diff --git a/.githooks/pre-push b/.githooks/pre-push index 73168e08ec4..ef5e77cbc79 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -6,14 +6,29 @@ RED='\033[0;31m' NC='\033[0m' # No Color +# Common prompts +INSTALL_PROPT="Please install ZK Stack CLI using zkstackup from https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli/zkstackup" +FORMAT_PROMPT="Please format the code via 'zkstack dev fmt', cannot push unformatted code" + # Check that prettier formatting rules are not violated. -if which zk_supervisor >/dev/null; then - if ! zk_supervisor fmt --check; then +if which zkstack >/dev/null; then + if ! zkstack dev fmt --check; then echo -e "${RED}Push error!${NC}" - echo "Please format the code via 'zks fmt', cannot push unformatted code" + echo -e "${FORMAT_PROMPT}" exit 1 fi else - echo "Please install zk_toolbox using zkup from https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup, and then run ./bin/zkt from the zksync-era repository." - exit 1 + if which zk_supervisor >/dev/null; then + echo -e "${RED}WARNING: zkup, zk_inception/zki, and zk_supervisor/zks are DEPRECATED.${NC}" + echo -e "${RED}${INSTALL_PROPT}${NC}" + + if ! zk_supervisor fmt --check; then + echo -e "${RED}Push error!${NC}" + echo -e "${FORMAT_PROMPT}" + exit 1 + fi + else + echo -e "${INSTALL_PROPT}" + exit 1 + fi fi diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index a712db9f75b..d68b45e9d43 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,4 +17,4 @@ - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. -- [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. +- [ ] Code has been formatted via `zkstack dev fmt` and `zkstack dev lint`. diff --git a/.github/release-please/config.json b/.github/release-please/config.json index 86839e804ca..358e249a18b 100644 --- a/.github/release-please/config.json +++ b/.github/release-please/config.json @@ -20,9 +20,9 @@ "release-type": "simple", "component": "prover" }, - "zk_toolbox": { + "zkstack_cli": { "release-type": "simple", - "component": "zk_toolbox", + "component": "zkstack_cli", "plugins": [ "cargo-workspace" ] diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 44e10fb13fd..a0d1d73bdda 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.27.0", + "core": "25.0.0", "prover": "16.5.0", - "zk_toolbox": "0.1.2" + "zkstack_cli": "0.1.2" } diff --git a/.github/workflows/build-contract-verifier-template.yml b/.github/workflows/build-contract-verifier-template.yml index bb385b2797b..1481e542de5 100644 --- a/.github/workflows/build-contract-verifier-template.yml +++ b/.github/workflows/build-contract-verifier-template.yml @@ -101,7 +101,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh @@ -113,15 +112,19 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run ./bin/zkt || true ci_run ./bin/zk || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + - name: install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + ci_run zkstack dev contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index 9d00f98b181..15d4432191d 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -71,11 +71,15 @@ jobs: if [ $(jq length <<<"$tags") -eq 0 ]; then echo "No tag found on all pages." echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/artifacts/ exit 0 fi filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") if [[ ! -z "$filtered_tag" ]]; then echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + # TODO Remove it when we migrate to foundry inside contracts repository + mkdir -p contracts/l1-contracts/out break fi ((page++)) @@ -110,7 +114,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run pre_download_compilers.sh @@ -123,14 +126,19 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run ./bin/zk || true - ci_run ./bin/zkt || true ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: build contracts if: env.BUILD_CONTRACTS == 'true' run: | ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + ci_run zkstack dev contracts --system-contracts --l1-contracts --l2-contracts - name: Login to Docker registries if: ${{ inputs.action == 'push' }} diff --git a/.github/workflows/build-local-node-docker.yml b/.github/workflows/build-local-node-docker.yml index f664bfaaa00..cbb4239b572 100644 --- a/.github/workflows/build-local-node-docker.yml +++ b/.github/workflows/build-local-node-docker.yml @@ -50,9 +50,13 @@ jobs: - name: start-services run: | - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g - name: init run: | @@ -61,9 +65,11 @@ jobs: ci_run git config --global --add safe.directory /usr/src/zksync/contracts ci_run zk - ci_run zkt ci_run cp etc/tokens/{test,localhost}.json - ci_run zk_supervisor contracts + + - name: build contracts + run: | + ci_run zkstack dev contracts - name: update-image run: | diff --git a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml index b92fb8e8111..30990889caf 100644 --- a/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml @@ -28,7 +28,6 @@ jobs: - name: Download Setup data run: | gsutil -m rsync -r gs://matterlabs-setup-data-us/${{ inputs.setup_keys_id }} docker/prover-gpu-fri-gar - cp -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ - name: Login to us-central1 GAR run: | @@ -70,6 +69,10 @@ jobs: --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + - name: Move Setup data from prover-gpu-fri-gar to circuit-prover-gpu-gar + run: | + mv -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ + - name: Build and push circuit-prover-gpu-gar uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index d6ec61114c7..91de5dd51ec 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -45,7 +45,7 @@ jobs: RUNNER_COMPOSE_FILE: "docker-compose-runner-nightly.yml" ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: ${{ inputs.CUDA_ARCH }} - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: component: @@ -56,6 +56,7 @@ jobs: - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor + - prover-autoscaler outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} steps: @@ -74,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server @@ -91,7 +91,6 @@ jobs: run: | ci_run run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key - - name: login to Docker registries if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) run: | diff --git a/.github/workflows/build-witness-generator-template.yml b/.github/workflows/build-witness-generator-template.yml index 33d78b3cf2f..d9493f97cae 100644 --- a/.github/workflows/build-witness-generator-template.yml +++ b/.github/workflows/build-witness-generator-template.yml @@ -75,7 +75,6 @@ jobs: - name: start-services run: | echo "IMAGE_TAG_SUFFIX=${{ env.IMAGE_TAG_SUFFIX }}" >> .env - mkdir -p ./volumes/postgres run_retried docker compose pull zk postgres docker compose up -d zk postgres ci_run sccache --start-server diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 2f51229aeaf..ea91fc4a7cd 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -27,15 +27,15 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - - - name: Init + + - name: Install zkstack run: | - ci_run zkt + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local # This does both linting and "building". We're using `zk lint prover` as it's common practice within our repo # `zk lint prover` = cargo clippy, which does cargo check behind the scenes, which is a lightweight version of cargo build - name: Lints - run: ci_run zk_supervisor lint -t rs --check + run: ci_run zkstack dev lint -t rs --check diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 6d0785fe46f..0babbd1c9db 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -26,24 +26,31 @@ jobs: - name: Start services run: | ci_localnet_up + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local - name: Build run: | - ci_run ./bin/zkt ci_run yarn install ci_run git config --global --add safe.directory /usr/src/zksync - ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + - name: Database setup + run: | + ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Lints run: | - ci_run zk_supervisor fmt --check - ci_run zk_supervisor lint -t md --check - ci_run zk_supervisor lint -t sol --check - ci_run zk_supervisor lint -t js --check - ci_run zk_supervisor lint -t ts --check - ci_run zk_supervisor lint -t rs --check + ci_run zkstack dev fmt --check + ci_run zkstack dev lint -t md --check + ci_run zkstack dev lint -t sol --check + ci_run zkstack dev lint -t js --check + ci_run zkstack dev lint -t ts --check + ci_run zkstack dev lint -t rs --check + ci_run zkstack dev lint -t autocompletion --check - name: Check Database run: | - ci_run zk_supervisor database check-sqlx-data --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + ci_run zkstack dev database check-sqlx-data --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index fed26bbbb3b..0e1c69ae4db 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -8,11 +8,14 @@ on: required: false default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' +env: + RUST_BACKTRACE: 1 + PASSED_ENV_VARS: RUST_BACKTRACE + jobs: lint: name: lint uses: ./.github/workflows/ci-core-lint-reusable.yml - unit-tests: runs-on: [ matterlabs-ci-runner-highmem-long ] @@ -57,102 +60,113 @@ jobs: - name: Init run: | ci_run run_retried rustup show - ci_run ./bin/zkt - ci_run zk_supervisor contracts - # FIXME: enable contract tests once tehy are stable - #- name: Contracts unit tests - # run: ci_run yarn l1-contracts test + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + + - name: Build contracts + run: | + ci_run zkstack dev contracts + +# - name: Contracts unit tests +# run: ci_run yarn l1-contracts test - name: Rust unit tests run: | - ci_run zk_supervisor test rust + ci_run zkstack dev test rust # Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible # with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually. - ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch - - # FIXME: support loadtest together with sync layer. - # loadtest: - # runs-on: [ matterlabs-ci-runner-high-performance ] - # strategy: - # fail-fast: false - # matrix: - # # FIXME: support new VM mode - # vm_mode: ["OLD"] - - # steps: - # - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - # with: - # submodules: "recursive" - # fetch-depth: 0 - - # - name: Setup environment - # run: | - # echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - # echo $(pwd)/bin >> $GITHUB_PATH - # echo IN_DOCKER=1 >> .env - # echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env - # echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env - # echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env - # echo "RUSTC_WRAPPER=sccache" >> .env - - # - name: Loadtest configuration - # run: | - # echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env - # echo ACCOUNTS_AMOUNT="100" >> .env - # echo MAX_INFLIGHT_TXS="10" >> .env - # echo SYNC_API_REQUESTS_LIMIT="15" >> .env - # echo FAIL_FAST=true >> .env - # echo IN_DOCKER=1 >> .env - - # - name: Start services - # run: | - # ci_localnet_up - # ci_run sccache --start-server - - # - name: Init - # run: | - # ci_run git config --global --add safe.directory /usr/src/zksync - # ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen - # ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts - # ci_run git config --global --add safe.directory /usr/src/zksync/contracts - - # ci_run ./bin/zkt - # ci_run zk_inception chain create \ - # --chain-name legacy \ - # --chain-id sequential \ - # --prover-mode no-proofs \ - # --wallet-creation localhost \ - # --l1-batch-commit-data-generator-mode rollup \ - # --base-token-address 0x0000000000000000000000000000000000000001 \ - # --base-token-price-nominator 1 \ - # --base-token-price-denominator 1 \ - # --set-as-default false \ - # --ignore-prerequisites \ - # --legacy-bridge - - # ci_run zk_inception ecosystem init --dev --verbose - # ci_run zk_supervisor contracts --test-contracts - - # # `sleep 60` because we need to wait until server added all the tokens - # - name: Run server - # run: | - # ci_run zk_supervisor config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy - # ci_run zk_inception server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & - # ci_run sleep 60 - - # - name: Perform loadtest - # run: ci_run zk_supervisor t loadtest -v --chain=legacy - - # - name: Show server.log logs - # if: always() - # run: ci_run cat server.log || true - - # - name: Show sccache logs - # if: always() - # run: | - # ci_run sccache --show-stats || true - # ci_run cat /tmp/sccache_log.txt || true +# ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch + +# loadtest: +# runs-on: [ matterlabs-ci-runner-high-performance ] +# strategy: +# fail-fast: false +# matrix: +# vm_mode: [ "OLD", "NEW" ] +# +# steps: +# - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 +# with: +# submodules: "recursive" +# fetch-depth: 0 +# +# - name: Setup environment +# run: | +# echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV +# echo $(pwd)/bin >> $GITHUB_PATH +# echo IN_DOCKER=1 >> .env +# echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env +# echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env +# echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env +# echo "RUSTC_WRAPPER=sccache" >> .env +# +# - name: Loadtest configuration +# run: | +# echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env +# echo ACCOUNTS_AMOUNT="100" >> .env +# echo MAX_INFLIGHT_TXS="10" >> .env +# echo SYNC_API_REQUESTS_LIMIT="15" >> .env +# echo FAIL_FAST=true >> .env +# echo IN_DOCKER=1 >> .env +# +# - name: Start services +# run: | +# ci_localnet_up +# ci_run sccache --start-server +# +# - name: Init +# run: | +# ci_run git config --global --add safe.directory /usr/src/zksync +# ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen +# ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts +# ci_run git config --global --add safe.directory /usr/src/zksync/contracts +# +# - name: Install zkstack +# run: | +# ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true +# ci_run zkstackup -g --local +# +# +# - name: Create and initialize legacy chain +# run: | +# ci_run zkstack chain create \ +# --chain-name legacy \ +# --chain-id sequential \ +# --prover-mode no-proofs \ +# --wallet-creation localhost \ +# --l1-batch-commit-data-generator-mode rollup \ +# --base-token-address 0x0000000000000000000000000000000000000001 \ +# --base-token-price-nominator 1 \ +# --base-token-price-denominator 1 \ +# --set-as-default false \ +# --ignore-prerequisites \ +# --legacy-bridge +# +# ci_run zkstack ecosystem init --dev --verbose +# ci_run zkstack dev contracts --test-contracts +# +# # `sleep 60` because we need to wait until server added all the tokens +# - name: Run server +# run: | +# ci_run zkstack dev config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy +# ci_run zkstack server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log & +# ci_run sleep 60 +# +# - name: Perform loadtest +# run: ci_run zkstack dev t loadtest -v --chain=legacy +# +# - name: Show server.log logs +# if: always() +# run: ci_run cat server.log || true +# +# - name: Show sccache logs +# if: always() +# run: | +# ci_run sccache --show-stats || true +# ci_run cat /tmp/sccache_log.txt || true integration-tests: runs-on: [ matterlabs-ci-runner-ultra-performance ] @@ -172,14 +186,17 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}" >> .env echo RUN_CONTRACT_VERIFICATION_TEST=true >> $GITHUB_ENV - name: Start services run: | ci_localnet_up - - name: Build zk_toolbox - run: ci_run bash -c "./bin/zkt" + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local - name: Create log directories run: | @@ -209,21 +226,19 @@ jobs: echo "EXTERNAL_NODE_LOGS_DIR=$EXTERNAL_NODE_LOGS_DIR" >> $GITHUB_ENV echo "FEES_LOGS_DIR=$FEES_LOGS_DIR" >> $GITHUB_ENV echo "REVERT_LOGS_DIR=$REVERT_LOGS_DIR" >> $GITHUB_ENV -# FIXME: restore tests for all the various types of chains + - name: Initialize ecosystem run: | ci_run git config --global --add safe.directory /usr/src/zksync ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts ci_run git config --global --add safe.directory /usr/src/zksync/contracts - ci_run zk_inception ecosystem init --deploy-paymaster --deploy-erc20 \ - --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_era \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_era \ - --ignore-prerequisites --verbose \ - --observability=false + ci_run zkstack ecosystem init --deploy-paymaster --deploy-erc20 \ + --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_era \ + --ignore-prerequisites --verbose \ + --observability=false - name: Read Custom Token address and set as environment variable run: | @@ -233,7 +248,7 @@ jobs: - name: Create and initialize Validium chain run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name validium \ --chain-id sequential \ --prover-mode no-proofs \ @@ -245,18 +260,16 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_validium \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_validium \ --chain validium - name: Create and initialize chain with Custom Token run: | - ci_run zk_inception chain create \ + ci_run zkstack chain create \ --chain-name custom_token \ --chain-id sequential \ --prover-mode no-proofs \ @@ -268,86 +281,201 @@ jobs: --set-as-default false \ --ignore-prerequisites - ci_run zk_inception chain init \ + ci_run zkstack chain init \ --deploy-paymaster \ --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_custom_token \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_custom_token \ --chain custom_token +# - name: Create and register chain with transactions signed "offline" +# run: | +# ci_run zkstack chain create \ +# --chain-name offline_chain \ +# --chain-id sequential \ +# --prover-mode no-proofs \ +# --wallet-creation localhost \ +# --l1-batch-commit-data-generator-mode rollup \ +# --base-token-address 0x0000000000000000000000000000000000000001 \ +# --base-token-price-nominator 1 \ +# --base-token-price-denominator 1 \ +# --set-as-default false \ +# --ignore-prerequisites +# +# ci_run zkstack chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545 +# +# governor_pk=$(awk '/governor:/ {flag=1} flag && /private_key:/ {print $2; exit}' ./configs/wallets.yaml) +# +# ci_run zkstack dev send-transactions \ +# --file ./transactions/chain/offline_chain/register-hyperchain-txns.json \ +# --l1-rpc-url http://127.0.0.1:8545 \ +# --private-key $governor_pk +# +# bridge_hub=$(awk '/bridgehub_proxy_addr/ {print $2}' ./configs/contracts.yaml) +# chain_id=$(awk '/chain_id:/ {print $2}' ./chains/offline_chain/ZkStack.yaml) +# +# hyperchain_output=$(ci_run cast call $bridge_hub "getHyperchain(uint256)" $chain_id) +# +# if [[ $hyperchain_output == 0x* && ${#hyperchain_output} -eq 66 ]]; then +# echo "Chain successfully registered: $hyperchain_output" +# else +# echo "Failed to register chain: $hyperchain_output" +# exit 1 +# fi + + - name: Create and initialize Consensus chain + run: | + ci_run zkstack chain create \ + --chain-name consensus \ + --chain-id sequential \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode validium \ + --base-token-address ${{ env.CUSTOM_TOKEN_ADDRESS }} \ + --base-token-price-nominator 3 \ + --base-token-price-denominator 2 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zkstack chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_consensus \ + --chain consensus + + - name: Export chain list to environment variable + run: | + CHAINS="era,validium,custom_token,consensus" + echo "CHAINS=$CHAINS" >> $GITHUB_ENV + - name: Initialize gateway chain run: | - ci_run zk_inception chain create \ - --chain-name gateway \ - --chain-id 505 \ - --prover-mode no-proofs \ - --wallet-creation localhost \ - --l1-batch-commit-data-generator-mode rollup \ - --base-token-address 0x0000000000000000000000000000000000000001 \ - --base-token-price-nominator 1 \ - --base-token-price-denominator 1 \ - --set-as-default false \ - --ignore-prerequisites - - ci_run zk_inception chain init \ - --deploy-paymaster \ - --l1-rpc-url=http://localhost:8545 \ - --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --server-db-name=zksync_server_localhost_gateway \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_gateway \ - --chain gateway - - ci_run zk_inception chain convert-to-gateway --chain gateway --ignore-prerequisites + ci_run zkstack chain create \ + --chain-name gateway \ + --chain-id 505 \ + --prover-mode no-proofs \ + --wallet-creation localhost \ + --l1-batch-commit-data-generator-mode rollup \ + --base-token-address 0x0000000000000000000000000000000000000001 \ + --base-token-price-nominator 1 \ + --base-token-price-denominator 1 \ + --set-as-default false \ + --ignore-prerequisites + + ci_run zkstack chain init \ + --deploy-paymaster \ + --l1-rpc-url=http://localhost:8545 \ + --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ + --server-db-name=zksync_server_localhost_gateway \ + --chain gateway + + ci_run zkstack chain convert-to-gateway --chain gateway --ignore-prerequisites - name: Run gateway run: | - ci_run zk_inception server --ignore-prerequisites --chain gateway &> ${{ env.SERVER_LOGS_DIR }}/gateway.log & - + ci_run zkstack server --ignore-prerequisites --chain gateway &> ${{ env.SERVER_LOGS_DIR }}/gateway.log & ci_run sleep 5 - name: Migrate chains to gateway run: | - ci_run zk_inception chain migrate-to-gateway --chain era --gateway-chain-name gateway - ci_run zk_inception chain migrate-to-gateway --chain validium --gateway-chain-name gateway - ci_run zk_inception chain migrate-to-gateway --chain custom_token --gateway-chain-name gateway - + ci_run zkstack chain migrate-to-gateway --chain era --gateway-chain-name gateway + ci_run zkstack chain migrate-to-gateway --chain validium --gateway-chain-name gateway + ci_run zkstack chain migrate-to-gateway --chain custom_token --gateway-chain-name gateway + ci_run zkstack chain migrate-to-gateway --chain consensus --gateway-chain-name gateway + - name: Migrate back era run: | - ci_run zk_inception chain migrate-from-gateway --chain era --gateway-chain-name gateway + ci_run zkstack chain migrate-from-gateway --chain era --gateway-chain-name gateway - name: Migrate to gateway again run: | - ci_run zk_inception chain migrate-to-gateway --chain era --gateway-chain-name gateway + ci_run zkstack chain migrate-to-gateway --chain era --gateway-chain-name gateway - name: Build test dependencies run: | - ci_run zk_supervisor test build + ci_run zkstack dev test build - - name: Run chains + - name: Initialize Contract verifier run: | - ci_run zk_inception server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & - ci_run zk_inception server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & - ci_run zk_inception server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & + ci_run zkstack contract-verifier init --zksolc-version=v1.5.3 --zkvyper-version=v1.5.4 --solc-version=0.8.26 --vyper-version=v0.3.10 --era-vm-solc-version=0.8.26-1.0.1 --only --chain era + ci_run zkstack contract-verifier run --chain era &> ${{ env.SERVER_LOGS_DIR }}/contract-verifier-rollup.log & + + - name: Run servers + run: | + ci_run zkstack server --ignore-prerequisites --chain era &> ${{ env.SERVER_LOGS_DIR }}/rollup.log & + ci_run zkstack server --ignore-prerequisites --chain validium &> ${{ env.SERVER_LOGS_DIR }}/validium.log & + ci_run zkstack server --ignore-prerequisites --chain custom_token &> ${{ env.SERVER_LOGS_DIR }}/custom_token.log & + ci_run zkstack server --ignore-prerequisites --chain consensus \ + --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads,vm_runner_bwip,vm_playground,da_dispatcher,consensus \ + &> ${{ env.SERVER_LOGS_DIR }}/consensus.log & ci_run sleep 5 - - name: Run integration tests + - name: Setup attester committee for the consensus chain run: | - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain era &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/rollup.log & - PID1=$! - - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain validium &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/validium.log & - PID2=$! + ci_run zkstack consensus set-attester-committee --chain consensus --from-genesis &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log - ci_run zk_supervisor test integration --no-deps --ignore-prerequisites --chain custom_token &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/custom_token.log & - PID3=$! + - name: Run integration tests + run: | + ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} + +# - name: Init external nodes +# run: | +# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ +# --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era +# ci_run zkstack external-node init --ignore-prerequisites --chain era +# +# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ +# --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium +# ci_run zkstack external-node init --ignore-prerequisites --chain validium +# +# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ +# --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token +# ci_run zkstack external-node init --ignore-prerequisites --chain custom_token +# +# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \ +# --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus +# ci_run zkstack external-node init --ignore-prerequisites --chain consensus +# +# - name: Run recovery tests (from snapshot) +# run: | +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --snapshot --no-deps --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} +# +# - name: Run recovery tests (from genesis) +# run: | +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} +# +# - name: Run external node server +# run: | +# ci_run zkstack external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log & +# ci_run zkstack external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log & +# ci_run zkstack external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log & +# ci_run zkstack external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log & +# +# - name: Run integration tests en +# run: | +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites --external-node" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} +# +# - name: Fee projection tests +# run: | +# ci_run killall -INT zksync_server || true +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.FEES_LOGS_DIR }} +# +# - name: Run revert tests +# run: | +# ci_run killall -INT zksync_server || true +# ci_run killall -INT zksync_external_node || true +# +# ci_run ./bin/run_on_all_chains.sh "zkstack dev test revert --no-deps --external-node --no-kill --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }} +# +# # Upgrade tests should run last, because as soon as they +# # finish the bootloader will be different +# # TODO make upgrade tests safe to run multiple times +# - name: Run upgrade test +# run: | +# ci_run zkstack dev test upgrade --no-deps --chain era - wait $PID1 - wait $PID2 - wait $PID3 - name: Upload logs uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 diff --git a/.github/workflows/ci-docs-reusable.yml b/.github/workflows/ci-docs-reusable.yml index 5b1d5a9bcdf..e1a9cf78df7 100644 --- a/.github/workflows/ci-docs-reusable.yml +++ b/.github/workflows/ci-docs-reusable.yml @@ -27,12 +27,17 @@ jobs: run_retried docker compose pull zk docker compose up -d zk + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + - name: Build run: | - ci_run ./bin/zkt ci_run yarn install ci_run git config --global --add safe.directory /usr/src/zksync - name: Lints run: | - ci_run zk_supervisor lint -t md --check + ci_run zkstack dev fmt --check + ci_run zkstack dev lint -t md --check diff --git a/.github/workflows/ci-prover-e2e.yml b/.github/workflows/ci-prover-e2e.yml new file mode 100644 index 00000000000..e69945eaaf2 --- /dev/null +++ b/.github/workflows/ci-prover-e2e.yml @@ -0,0 +1,126 @@ +name: Workflow for testing prover component end-to-end +on: + workflow_call: + +jobs: + e2e-test: + runs-on: [ matterlabs-ci-gpu-l4-runner-prover-tests ] + env: + RUNNER_COMPOSE_FILE: "docker-compose-gpu-runner-cuda-12-0.yml" + + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + with: + submodules: "recursive" + fetch-depth: 0 + + - name: Setup environment + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo IN_DOCKER=1 >> .env + echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env + echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env + echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env + echo "RUSTC_WRAPPER=sccache" >> .env + + mkdir -p prover_logs + +# - name: Start services +# run: | +# run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull +# docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait +# ci_run sccache --start-server +# +# - name: Init +# run: | +# ci_run git config --global --add safe.directory "*" +# ci_run chmod -R +x ./bin +# +# ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true +# ci_run zkstackup -g --local +# +# ci_run zkstack chain create \ +# --chain-name proving_chain \ +# --chain-id sequential \ +# --prover-mode gpu \ +# --wallet-creation localhost \ +# --l1-batch-commit-data-generator-mode rollup \ +# --base-token-address 0x0000000000000000000000000000000000000001 \ +# --base-token-price-nominator 1 \ +# --base-token-price-denominator 1 \ +# --set-as-default true \ +# --ignore-prerequisites +# +# ci_run zkstack ecosystem init --dev --verbose +# ci_run zkstack prover init --dev --verbose +# +# echo "URL=$(grep "http_url" ./chains/proving_chain/configs/general.yaml | awk '{ print $2 }')" >> $GITHUB_ENV +# - name: Build prover binaries +# run: | +# ci_run cargo build --release --workspace --manifest-path=prover/Cargo.toml +# - name: Prepare prover subsystem +# run: | +# ci_run zkstack prover init-bellman-cuda --clone --verbose +# ci_run zkstack prover setup-keys --mode=download --region=us --verbose +# - name: Run server +# run: | +# ci_run zkstack server --uring --chain=proving_chain --components=api,tree,eth,state_keeper,commitment_generator,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip &>prover_logs/server.log & +# - name: Run Gateway +# run: | +# ci_run zkstack prover run --component=gateway --docker=false &>prover_logs/gateway.log & +# - name: Run Prover Job Monitor +# run: | +# ci_run zkstack prover run --component=prover-job-monitor --docker=false &>prover_logs/prover-job-monitor.log & +# - name: Wait for batch to be passed through gateway +# env: +# DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain +# BATCH_NUMBER: 1 +# INTERVAL: 30 +# TIMEOUT: 300 +# run: | +# PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ +# ci_run ./bin/prover_checkers/batch_availability_checker +# - name: Run Witness Generator +# run: | +# ci_run zkstack prover run --component=witness-generator --round=all-rounds --docker=false &>prover_logs/witness-generator.log & +# - name: Run Circuit Prover +# run: | +# ci_run zkstack prover run --component=circuit-prover --witness-vector-generator-count=10 --docker=false &>prover_logs/circuit_prover.log & +# - name: Wait for prover jobs to finish +# env: +# DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain +# BATCH_NUMBER: 1 +# INTERVAL: 30 +# TIMEOUT: 1200 +# run: | +# PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \ +# ci_run ./bin/prover_checkers/prover_jobs_status_checker +# +# - name: Kill prover & start compressor +# run: | +# sudo ./bin/prover_checkers/kill_prover +# +# ci_run zkstack prover run --component=compressor --docker=false &>prover_logs/compressor.log & +# - name: Wait for batch to be executed on L1 +# env: +# DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain +# BATCH_NUMBER: 1 +# INTERVAL: 30 +# TIMEOUT: 600 +# run: | +# PASSED_ENV_VARS="BATCH_NUMBER,DATABASE_URL,URL,INTERVAL,TIMEOUT" \ +# ci_run ./bin/prover_checkers/batch_l1_status_checker +# +# - name: Upload logs +# uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 +# if: always() +# with: +# name: prover_logs +# path: prover_logs +# +# - name: Show sccache logs +# if: always() +# run: | +# ci_run sccache --show-stats || true +# ci_run cat /tmp/sccache_log.txt || true diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 3f842b23488..7f719b2240d 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -27,16 +27,21 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres - - name: Init + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup + ci_run zkstackup -g --local + + - name: Database setup run: | - ci_run zkt - ci_run zk_supervisor db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} + ci_run zkstack dev db setup --prover-url=${{ env.prover_url }} --core-url=${{ env.core_url }} - name: Formatting - run: ci_run bash -c "cd prover && cargo fmt --check" + run: | + ci_run git config --global --add safe.directory /usr/src/zksync + ci_run zkstack dev fmt --check rustfmt unit-tests: runs-on: [ matterlabs-ci-runner-highmem-long ] @@ -62,15 +67,18 @@ jobs: - name: Start services run: | run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull - mkdir -p ./volumes/postgres docker-compose -f ${RUNNER_COMPOSE_FILE} up --build -d zk postgres + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + - name: Init run: | - ci_run zkt ci_run run_retried rustup show - name: Prover unit tests run: | # Not all tests are enabled, since prover and setup_key_generator_and_server requires bellman-cuda to be present - ci_run zk_supervisor test prover + ci_run zkstack dev test prover diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9c8817cf5cc..e4bf1596d48 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -21,7 +21,7 @@ jobs: outputs: core: ${{ steps.changed-files.outputs.core_any_changed }} prover: ${{ steps.changed-files.outputs.prover_any_changed }} - zk_toolbox: ${{ steps.changed-files.outputs.zk_toolbox_any_changed }} + zkstack_cli: ${{ steps.changed-files.outputs.zkstack_cli_any_changed }} docs: ${{ steps.changed-files.outputs.docs_any_changed }} all: ${{ steps.changed-files.outputs.all_any_changed }} steps: @@ -43,6 +43,9 @@ jobs: - '!prover/extract-setup-data-keys.sh' - 'docker/prover*/**' - '.github/workflows/build-prover-template.yml' + - '.github/workflows/new-build-prover-template.yml' + - '.github/workflows/build-witness-generator-template.yml' + - '.github/workflows/new-build-witness-generator-template.yml' - '.github/workflows/ci-prover-reusable.yml' - 'docker-compose-runner-nightly.yml' - '!**/*.md' @@ -54,12 +57,14 @@ jobs: - 'docker/external-node/**' - 'docker/server/**' - '.github/workflows/build-core-template.yml' + - '.github/workflows/new-build-core-template.yml' - '.github/workflows/build-contract-verifier-template.yml' + - '.github/workflows/new-build-contract-verifier-template.yml' - '.github/workflows/ci-core-reusable.yml' - '.github/workflows/ci-core-lint-reusable.yml' - 'Cargo.toml' - 'Cargo.lock' - - 'zk_toolbox/**' + - 'zkstack_cli/**' - '!**/*.md' - '!**/*.MD' - 'docker-compose.yml' @@ -95,6 +100,12 @@ jobs: name: CI for Prover Components uses: ./.github/workflows/ci-prover-reusable.yml + e2e-for-prover: + name: E2E Test for Prover Components + needs: changed_files + if: ${{(needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} + uses: ./.github/workflows/ci-prover-e2e.yml + ci-for-docs: needs: changed_files if: needs.changed_files.outputs.docs == 'true' diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml index 42791eab666..0d6e2049ad0 100644 --- a/.github/workflows/new-build-contract-verifier-template.yml +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -38,6 +38,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo CI=1 >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $HOME/.local/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env @@ -73,39 +74,39 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' + run: | + mkdir ./foundry-zksync + curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz + tar zxf foundry_nightly_linux_amd64.tar.gz -C ./foundry-zksync + chmod +x ./foundry-zksync/forge ./foundry-zksync/cast + echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -123,18 +124,18 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash + - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - zkt || true + ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup + zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json - zk_supervisor contracts + zkstack dev contracts - name: Upload contracts uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 @@ -143,129 +144,129 @@ jobs: path: | ./contracts - build-images: - name: Build and Push Docker Images - needs: prepare-contracts - runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} - strategy: - matrix: - components: - - contract-verifier - - verified-sources-fetcher - platforms: - - linux/amd64 - - steps: - - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - with: - submodules: "recursive" - - - name: Set up Docker Buildx - uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - - - name: Setup env - shell: bash - run: | - echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - echo CI=1 >> $GITHUB_ENV - echo $(pwd)/bin >> $GITHUB_PATH - echo CI=1 >> .env - echo IN_DOCKER=1 >> .env - - - name: Download setup key - shell: bash - run: | - run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key - - - name: Set env vars - shell: bash - run: | - echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV - echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV - # Support for custom tag suffix - if [ -n "${{ inputs.image_tag_suffix }}" ]; then - echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV - else - echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV - fi - - - name: Download contracts - uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 - with: - name: contacts-verifier - path: | - ./contracts - - - name: login to Docker registries - if: ${{ inputs.action == 'push' }} - shell: bash - run: | - docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - gcloud auth configure-docker us-docker.pkg.dev -q - - - name: Build and push - uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 - with: - context: . - push: ${{ inputs.action == 'push' }} - file: docker/${{ matrix.components }}/Dockerfile - build-args: | - SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage - SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com - SCCACHE_GCS_RW_MODE=READ_WRITE - RUSTC_WRAPPER=sccache - tags: | - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest - matterlabs/${{ matrix.components }}:latest - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 - matterlabs/${{ matrix.components }}:latest2.0 - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} - matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} - - create_manifest: - name: Create release manifest - runs-on: matterlabs-ci-runner - needs: build-images - if: ${{ inputs.action == 'push' }} - strategy: - matrix: - component: - - name: contract-verifier - platform: linux/amd64 - - name: verified-sources-fetcher - platform: linux/amd64 - env: - IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} - steps: - - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 - - - name: login to Docker registries - run: | - docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} - gcloud auth configure-docker us-docker.pkg.dev -q - - - name: Create Docker manifest - run: | - docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") - platforms=${{ matrix.component.platform }} - for repo in "${docker_repositories[@]}"; do - platform_tags="" - for platform in ${platforms//,/ }; do - platform=$(echo $platform | tr '/' '-') - platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" - done - for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do - docker manifest create ${manifest} ${platform_tags} - docker manifest push ${manifest} - done - done +# build-images: +# name: Build and Push Docker Images +# needs: prepare-contracts +# runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} +# strategy: +# matrix: +# components: +# - contract-verifier +# - verified-sources-fetcher +# platforms: +# - linux/amd64 +# +# steps: +# - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 +# with: +# submodules: "recursive" +# +# - name: Set up Docker Buildx +# uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 +# +# - name: Setup env +# shell: bash +# run: | +# echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV +# echo CI=1 >> $GITHUB_ENV +# echo $(pwd)/bin >> $GITHUB_PATH +# echo CI=1 >> .env +# echo IN_DOCKER=1 >> .env +# +# - name: Download setup key +# shell: bash +# run: | +# run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key +# +# - name: Set env vars +# shell: bash +# run: | +# echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV +# echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV +# # Support for custom tag suffix +# if [ -n "${{ inputs.image_tag_suffix }}" ]; then +# echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV +# else +# echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV +# fi +# +# - name: Download contracts +# uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 +# with: +# name: contacts-verifier +# path: | +# ./contracts +# +# - name: login to Docker registries +# if: ${{ inputs.action == 'push' }} +# shell: bash +# run: | +# docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} +# gcloud auth configure-docker us-docker.pkg.dev -q +# +# - name: Build and push +# uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 +# with: +# context: . +# push: ${{ inputs.action == 'push' }} +# file: docker/${{ matrix.components }}/Dockerfile +# build-args: | +# SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage +# SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com +# SCCACHE_GCS_RW_MODE=READ_WRITE +# RUSTC_WRAPPER=sccache +# tags: | +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest +# matterlabs/${{ matrix.components }}:latest +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 +# matterlabs/${{ matrix.components }}:latest2.0 +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} +# matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} +# matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} +# matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} +# matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} +# matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} +# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} +# matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} +# +# create_manifest: +# name: Create release manifest +# runs-on: matterlabs-ci-runner +# needs: build-images +# if: ${{ inputs.action == 'push' }} +# strategy: +# matrix: +# component: +# - name: contract-verifier +# platform: linux/amd64 +# - name: verified-sources-fetcher +# platform: linux/amd64 +# env: +# IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} +# steps: +# - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 +# +# - name: login to Docker registries +# run: | +# docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} +# gcloud auth configure-docker us-docker.pkg.dev -q +# +# - name: Create Docker manifest +# run: | +# docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") +# platforms=${{ matrix.component.platform }} +# for repo in "${docker_repositories[@]}"; do +# platform_tags="" +# for platform in ${platforms//,/ }; do +# platform=$(echo $platform | tr '/' '-') +# platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" +# done +# for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do +# docker manifest create ${manifest} ${platform_tags} +# docker manifest push ${manifest} +# done +# done diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml index fba6a68b8ee..c4aeb9180fd 100644 --- a/.github/workflows/new-build-core-template.yml +++ b/.github/workflows/new-build-core-template.yml @@ -43,6 +43,7 @@ jobs: echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV echo CI=1 >> $GITHUB_ENV echo $(pwd)/bin >> $GITHUB_PATH + echo $HOME/.local/bin >> $GITHUB_PATH echo CI=1 >> .env echo IN_DOCKER=1 >> .env @@ -78,39 +79,39 @@ jobs: tar -C ./contracts -zxf system-contracts.tar.gz - name: Install Apt dependencies + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config - name: Install Node + if: env.BUILD_CONTRACTS == 'true' uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 with: node-version: 20 cache: 'npm' - name: Install Yarn + if: env.BUILD_CONTRACTS == 'true' run: npm install -g yarn - name: Setup rust + if: env.BUILD_CONTRACTS == 'true' uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 with: toolchain: nightly-2024-08-01 - - name: Install cargo-nextest from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: cargo-nextest - - - name: Install sqlx-cli from crates.io - uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 - with: - crate: sqlx-cli - tag: 0.8.1 - - - name: Install Foundry - uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + - name: Install foundry-zksync + if: env.BUILD_CONTRACTS == 'true' + run: | + mkdir ./foundry-zksync + curl -LO https://github.com/matter-labs/foundry-zksync/releases/download/nightly-15bec2f861b3b4c71e58f85e2b2c9dd722585aa8/foundry_nightly_linux_amd64.tar.gz + tar zxf foundry_nightly_linux_amd64.tar.gz -C ./foundry-zksync + chmod +x ./foundry-zksync/forge ./foundry-zksync/cast + echo "$PWD/foundry-zksync" >> $GITHUB_PATH - name: Pre-download compilers + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | # Download needed versions of vyper compiler @@ -128,18 +129,18 @@ jobs: chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" done - - name: init - shell: bash + - name: Install zkstack + if: env.BUILD_CONTRACTS == 'true' run: | - mkdir -p ./volumes/postgres - docker compose up -d postgres - zkt || true + ./zkstack_cli/zkstackup/install --path ./zkstack_cli/zkstackup/zkstackup + zkstackup --local || true - name: build contracts + if: env.BUILD_CONTRACTS == 'true' shell: bash run: | cp etc/tokens/{test,localhost}.json - zk_supervisor contracts + zkstack dev contracts - name: Upload contracts uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml index 60c152213e6..5d42696c0b2 100644 --- a/.github/workflows/new-build-prover-template.yml +++ b/.github/workflows/new-build-prover-template.yml @@ -40,7 +40,7 @@ on: jobs: get-protocol-version: name: Get protocol version - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] outputs: protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} steps: @@ -86,7 +86,7 @@ jobs: needs: get-protocol-version env: PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} - runs-on: [ matterlabs-ci-runner-high-performance ] + runs-on: [matterlabs-ci-runner-high-performance] strategy: matrix: components: @@ -96,6 +96,7 @@ jobs: - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor + - prover-autoscaler steps: - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: @@ -166,7 +167,7 @@ jobs: copy-images: name: Copy images between docker registries - needs: [ build-images, get-protocol-version ] + needs: [build-images, get-protocol-version] env: PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} runs-on: matterlabs-ci-runner @@ -187,12 +188,12 @@ jobs: run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} - name: Login and push to Europe GAR run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index 11a844fdd2b..18708420dab 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -39,7 +39,7 @@ jobs: - '!prover/**' setup: name: Setup - runs-on: [ matterlabs-deployer-stage ] + runs-on: [matterlabs-deployer-stage] outputs: image_tag_suffix: ${{ steps.generate-tag-suffix.outputs.image_tag_suffix }} prover_fri_gpu_key_id: ${{ steps.extract-prover-fri-setup-key-ids.outputs.gpu_short_commit_sha }} @@ -58,10 +58,9 @@ jobs: run: | ./prover/extract-setup-data-keys.sh >> $GITHUB_OUTPUT - build-push-core-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-core-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -72,7 +71,7 @@ jobs: build-push-tee-prover-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-tee-prover-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -84,7 +83,7 @@ jobs: build-push-contract-verifier: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-contract-verifier-template.yml if: needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -95,7 +94,7 @@ jobs: build-push-prover-images: name: Build and push images - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-prover-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -108,7 +107,7 @@ jobs: build-push-witness-generator-image-avx512: name: Build and push prover images with avx512 instructions - needs: [ setup, changed_files ] + needs: [setup, changed_files] uses: ./.github/workflows/build-witness-generator-template.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: @@ -122,7 +121,7 @@ jobs: build-gar-prover-fri-gpu-and-circuit-prover-gpu-gar: name: Build GAR prover FRI GPU - needs: [ setup, build-push-prover-images ] + needs: [setup, build-push-prover-images] uses: ./.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml index a57bed3006a..ae7e5ee671b 100644 --- a/.github/workflows/vm-perf-comparison.yml +++ b/.github/workflows/vm-perf-comparison.yml @@ -18,65 +18,66 @@ jobs: fetch-depth: 0 ref: ${{ github.base_ref }} - # - name: fetch PR branch - # run: | - # git remote add pr_repo ${{ github.event.pull_request.head.repo.clone_url }} - # git fetch pr_repo ${{ github.event.pull_request.head.ref }} - - # - name: fetch merge-base SHA - # id: merge_base - # run: echo "sha=$(git merge-base HEAD FETCH_HEAD)" >> $GITHUB_OUTPUT - - # - name: checkout divergence point - # run: git checkout ${{ steps.merge_base.outputs.sha }} --recurse-submodules - - # - name: setup-env - # run: | - # touch .env - # echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV - # echo $(pwd)/bin >> $GITHUB_PATH - # echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env - # echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env - # echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env - # echo "RUSTC_WRAPPER=sccache" >> .env - - # - name: init - # run: | - # run_retried docker compose pull zk - # docker compose up -d zk - - # - name: run benchmarks on base branch - # shell: bash - # run: | - # ci_run zkt - # ci_run zk_supervisor contracts --system-contracts - # ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai - # ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes - - # - name: checkout PR - # run: | - # git checkout --force FETCH_HEAD --recurse-submodules - - # - name: run benchmarks on PR - # shell: bash - # run: | - # ci_run zkt - # ci_run zk_supervisor contracts --system-contracts - # ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai - # ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes - - # EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) - # echo "speedup<<$EOF" >> $GITHUB_OUTPUT - # ci_run cargo run --package vm-benchmark --release --bin compare_iai_results base-iai pr-iai base-opcodes pr-opcodes >> $GITHUB_OUTPUT - # echo "$EOF" >> $GITHUB_OUTPUT - # id: comparison - - # - name: Comment on PR - # uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 - # with: - # message: | - # ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} - # ${{ steps.comparison.outputs.speedup }} - # comment_tag: vm-performance-changes - # mode: recreate - # create_if_not_exists: ${{ steps.comparison.outputs.speedup != '' }} +# - name: fetch PR branch +# run: | +# git remote add pr_repo ${{ github.event.pull_request.head.repo.clone_url }} +# git fetch pr_repo ${{ github.event.pull_request.head.ref }} +# +# - name: fetch merge-base SHA +# id: merge_base +# run: echo "sha=$(git merge-base HEAD FETCH_HEAD)" >> $GITHUB_OUTPUT +# +# - name: checkout divergence point +# run: git checkout ${{ steps.merge_base.outputs.sha }} --recurse-submodules +# +# - name: setup-env +# run: | +# touch .env +# echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV +# echo $(pwd)/bin >> $GITHUB_PATH +# echo $(pwd)/zkstack_cli/zkstackup >> $GITHUB_PATH +# echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env +# echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env +# echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env +# echo "RUSTC_WRAPPER=sccache" >> .env +# +# - name: init +# run: | +# run_retried docker compose pull zk +# docker compose up -d zk +# +# - name: run benchmarks on base branch +# shell: bash +# run: | +# ci_run zkstackup -g --local +# ci_run zkstack dev contracts --system-contracts +# ci_run cargo bench --package vm-benchmark --bench iai | tee base-iai +# ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes || touch base-opcodes +# +# - name: checkout PR +# run: | +# git checkout --force FETCH_HEAD --recurse-submodules +# +# - name: run benchmarks on PR +# shell: bash +# run: | +# ci_run zkstackup -g --local +# ci_run zkstack dev contracts --system-contracts +# ci_run cargo bench --package vm-benchmark --bench iai | tee pr-iai +# ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee pr-opcodes || touch pr-opcodes +# +# EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64) +# echo "speedup<<$EOF" >> $GITHUB_OUTPUT +# ci_run cargo run --package vm-benchmark --release --bin compare_iai_results base-iai pr-iai base-opcodes pr-opcodes >> $GITHUB_OUTPUT +# echo "$EOF" >> $GITHUB_OUTPUT +# id: comparison +# +# - name: Comment on PR +# uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0 +# with: +# message: | +# ${{ steps.comparison.outputs.speedup == '' && '## No performance difference detected (anymore)' || '## Detected VM performance changes' }} +# ${{ steps.comparison.outputs.speedup }} +# comment_tag: vm-performance-changes +# mode: recreate +# create_if_not_exists: ${{ steps.comparison.outputs.speedup != '' }} diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 4c8c90a0d8f..d336a1472e4 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -33,8 +33,15 @@ jobs: run: | run_retried docker compose pull zk docker compose up -d zk - ci_run zkt - ci_run zk_supervisor contracts + + - name: Install zkstack + run: | + ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true + ci_run zkstackup -g --local + + - name: build contracts + run: | + ci_run zkstack dev contracts - name: run benchmarks run: | diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 5a08dff178c..b9321c8f5d6 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -4,6 +4,12 @@ on: # Workflow dispatch, to allow building and pushing new environments. # It will NOT mark them as latest. workflow_dispatch: + inputs: + build_cuda: + description: "Build CUDA images or not" + type: boolean + required: false + default: false push: branches: @@ -43,10 +49,10 @@ jobs: - docker/zk-environment/Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_11_8: - - docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile - .github/workflows/zk-environment-publish.yml zk_env_cuda_12: - - docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile + - docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile - .github/workflows/zk-environment-publish.yml get_short_sha: @@ -202,25 +208,25 @@ jobs: echo "should_run=$changed_files_output" >> "$GITHUB_OUTPUT" - name: Checkout code - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: submodules: "recursive" - name: Log in to US GAR - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - name: Log in to Docker Hub - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io @@ -228,19 +234,19 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 - name: Build and optionally push - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 with: - file: docker/zk-environment/20.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile - push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + file: docker/zk-environment/22.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile + push: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/main' ) || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-${{ matrix.cuda_version }}:latest matterlabs/zk-environment:cuda-${{ matrix.cuda_version }}-latest diff --git a/.gitignore b/.gitignore index d60a93bba74..ea01fe127aa 100644 --- a/.gitignore +++ b/.gitignore @@ -30,7 +30,7 @@ Cargo.lock !/Cargo.lock !/infrastructure/zksync-crypto/Cargo.lock !/prover/Cargo.lock -!/zk_toolbox/Cargo.lock +!/zkstack_cli/Cargo.lock /etc/env/target/* /etc/env/.current @@ -116,9 +116,10 @@ hyperchain-*.yml prover/crates/bin/vk_setup_data_generator_server_fri/data/setup_* prover/data/keys/setup_* -# Zk Toolbox +# ZK Stack CLI chains/era/configs/* chains/gateway/* +chains/avail/* configs/* era-observability/ core/tests/ts-integration/deployments-zk diff --git a/Cargo.lock b/Cargo.lock index 9a51d81a257..629869af757 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -292,6 +292,20 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "async-compression" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" +dependencies = [ + "futures-core", + "memchr", + "pin-project-lite", + "tokio", + "zstd", + "zstd-safe", +] + [[package]] name = "async-executor" version = "1.13.1" @@ -1313,14 +1327,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" +checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" dependencies = [ "derivative", "serde", - "zk_evm 0.150.5", - "zkevm_circuits 0.150.5", + "zk_evm 0.150.6", + "zkevm_circuits 0.150.6", ] [[package]] @@ -1380,11 +1394,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" +checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.6", "derivative", "rayon", "serde", @@ -1539,6 +1553,15 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "const-decoder" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b381abde2cdc1bc3817e394b24e05667a2dc89f37570cbd34d9c397d99e56e3f" +dependencies = [ + "compile-fmt", +] + [[package]] name = "const-oid" version = "0.9.6" @@ -3303,6 +3326,12 @@ dependencies = [ "url", ] +[[package]] +name = "human-repr" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f58b778a5761513caf593693f8951c97a5b610841e754788400f32102eefdff1" + [[package]] name = "hyper" version = "0.14.30" @@ -5871,6 +5900,7 @@ version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ + "async-compression", "base64 0.22.1", "bytes", "encoding_rs", @@ -8294,13 +8324,16 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" dependencies = [ + "async-compression", "bitflags 2.6.0", "bytes", + "futures-core", "http 1.1.0", "http-body 1.0.1", "http-body-util", "pin-project-lite", "tokio", + "tokio-util", "tower-layer", "tower-service", ] @@ -8728,6 +8761,7 @@ dependencies = [ "zksync_types", "zksync_utils", "zksync_vlog", + "zksync_vm2", ] [[package]] @@ -9326,9 +9360,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" +checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" dependencies = [ "anyhow", "lazy_static", @@ -9336,7 +9370,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.5", + "zk_evm_abstractions 0.150.6", ] [[package]] @@ -9367,15 +9401,15 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" +checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", ] [[package]] @@ -9424,9 +9458,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" +checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" dependencies = [ "arrayvec 0.7.6", "boojum", @@ -9438,7 +9472,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", "zksync_cs_derive", ] @@ -9486,9 +9520,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" +checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -9612,7 +9646,7 @@ dependencies = [ "anyhow", "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "futures 0.3.30", "itertools 0.10.5", "num_cpus", @@ -9624,7 +9658,7 @@ dependencies = [ "vise", "zk_evm 0.133.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_contracts", "zksync_dal", "zksync_eth_client", @@ -9633,6 +9667,7 @@ dependencies = [ "zksync_multivm", "zksync_node_genesis", "zksync_node_test_utils", + "zksync_system_constants", "zksync_types", "zksync_utils", "zksync_web3_decl", @@ -9640,9 +9675,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -9666,8 +9701,12 @@ dependencies = [ "secrecy", "serde", "serde_json", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -9677,9 +9716,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e7199c07aa14d9c3319839b98ad0496aac6e72327e70ded77ddb66329766db" +checksum = "a8001633dee671134cf572175a6c4f817904ce5f8d92e9b51f49891c5184a831" dependencies = [ "anyhow", "async-trait", @@ -9699,9 +9738,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -9720,9 +9759,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db07f7329b29737d8fd6860b350c809ae1b56ad53e26a7d0eddf3664ccb9dacb" +checksum = "061546668dd779ecb08302d2c84a6419e0093ad42aaa279bf20a8fa2ffda1be4" dependencies = [ "anyhow", "async-trait", @@ -9742,9 +9781,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a89a2d60db1ccd41438d29724a8d0d57fcf9506eb4443ea4b9205fd78c9c8e59" +checksum = "4e9789b5be26d20511bd7930bd9916d91122ff6cb09a28898563152a52f9f5eb" dependencies = [ "anyhow", "async-trait", @@ -9752,6 +9791,7 @@ dependencies = [ "build_html", "bytesize", "http-body-util", + "human-repr", "hyper 1.4.1", "hyper-util", "im", @@ -9778,9 +9818,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -9800,9 +9840,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" +checksum = "b2b2aab4ed18b13cd584f4edcc2546c8da82f89ac62e525063e12935ff28c9be" dependencies = [ "anyhow", "async-trait", @@ -9820,9 +9860,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand 0.8.5", @@ -9989,14 +10029,17 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "backon", "base58", "blake2 0.10.6", "blake2b_simd", + "bytes", "flate2", "futures 0.3.30", "hex", "jsonrpsee 0.23.2", "parity-scale-codec", + "reqwest 0.12.7", "scale-encode", "serde", "serde_json", @@ -10050,10 +10093,13 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_consensus_storage", + "zksync_consensus_utils", "zksync_contracts", "zksync_db_connection", + "zksync_l1_contract_interface", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -10152,8 +10198,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "tokio", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -10180,7 +10226,7 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.27.0" +version = "25.0.0" dependencies = [ "anyhow", "assert_matches", @@ -10247,7 +10293,9 @@ dependencies = [ "rand 0.8.5", "reqwest 0.12.7", "serde", + "serde_json", "tokio", + "tracing", "url", "zksync_config", "zksync_types", @@ -10331,9 +10379,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" +checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" dependencies = [ "boojum", "derivative", @@ -10343,15 +10391,17 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.5", + "zkevm_circuits 0.150.6", ] [[package]] name = "zksync_l1_contract_interface" version = "0.1.0" dependencies = [ + "anyhow", "hex", "once_cell", + "rand 0.8.5", "serde", "serde_json", "serde_with", @@ -10470,20 +10520,21 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "ethabi", "hex", "itertools 0.10.5", "once_cell", + "pretty_assertions", + "test-casing", "thiserror", - "tokio", "tracing", "vise", "zk_evm 0.131.0-rc.2", "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_contracts", "zksync_eth_signer", "zksync_mini_merkle_tree", @@ -10504,6 +10555,7 @@ dependencies = [ "async-trait", "axum", "chrono", + "const-decoder", "futures 0.3.30", "governor", "hex", @@ -10524,7 +10576,7 @@ dependencies = [ "tower-http", "tracing", "vise", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_config", "zksync_consensus_roles", "zksync_contracts", @@ -10623,7 +10675,6 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "bigdecimal", "test-casing", "tokio", "tracing", @@ -10632,7 +10683,6 @@ dependencies = [ "zksync_dal", "zksync_eth_client", "zksync_types", - "zksync_utils", "zksync_web3_decl", ] @@ -10689,7 +10739,6 @@ dependencies = [ "zksync_state", "zksync_state_keeper", "zksync_storage", - "zksync_tee_verifier_input_producer", "zksync_types", "zksync_utils", "zksync_vlog", @@ -10757,6 +10806,7 @@ dependencies = [ "anyhow", "assert_matches", "async-trait", + "backon", "chrono", "futures 0.3.30", "once_cell", @@ -10848,6 +10898,7 @@ dependencies = [ "serde_json", "tokio", "tower 0.4.13", + "tower-http", "tracing", "vise", "zksync_basic_types", @@ -10858,13 +10909,15 @@ dependencies = [ "zksync_object_store", "zksync_prover_interface", "zksync_types", + "zksync_utils", + "zksync_vm_executor", ] [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -10883,9 +10936,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck 0.5.0", @@ -10909,6 +10962,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -10923,7 +10977,7 @@ version = "0.1.0" dependencies = [ "bincode", "chrono", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "serde", "serde_json", "serde_with", @@ -11166,6 +11220,8 @@ name = "zksync_tee_verifier" version = "0.1.0" dependencies = [ "anyhow", + "bincode", + "once_cell", "serde", "tracing", "zksync_config", @@ -11173,29 +11229,9 @@ dependencies = [ "zksync_crypto_primitives", "zksync_merkle_tree", "zksync_multivm", - "zksync_object_store", - "zksync_prover_interface", - "zksync_types", - "zksync_utils", -] - -[[package]] -name = "zksync_tee_verifier_input_producer" -version = "0.1.0" -dependencies = [ - "anyhow", - "async-trait", - "tokio", - "tracing", - "vise", - "zksync_dal", - "zksync_object_store", "zksync_prover_interface", - "zksync_queued_job_processor", - "zksync_tee_verifier", "zksync_types", "zksync_utils", - "zksync_vm_executor", ] [[package]] @@ -11231,7 +11267,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -11240,7 +11275,6 @@ dependencies = [ "tokio", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", @@ -11301,20 +11335,20 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", + "zk_evm_abstractions 0.150.6", + "zkevm_opcode_defs 0.150.6", "zksync_vm2_interface", ] [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "primitive-types", ] @@ -11324,6 +11358,7 @@ name = "zksync_vm_executor" version = "0.1.0" dependencies = [ "anyhow", + "assert_matches", "async-trait", "once_cell", "tokio", @@ -11413,6 +11448,24 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zstd" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcf2b778a664581e31e389454a7072dab1647606d44f7feea22cd5abb9c9f3f9" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54a3ab4db68cea366acc5c897c7b4d4d1b8994a9cd6e6f841f8964566a419059" +dependencies = [ + "zstd-sys", +] + [[package]] name = "zstd-sys" version = "2.0.13+zstd.1.5.6" diff --git a/Cargo.toml b/Cargo.toml index 94fadb25968..0f8e6ba77ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,85 +1,84 @@ [workspace] members = [ - # Binaries - "core/bin/block_reverter", - "core/bin/contract-verifier", - "core/bin/external_node", - "core/bin/merkle_tree_consistency_checker", - "core/bin/snapshots_creator", - "core/bin/selector_generator", - "core/bin/system-constants-generator", - "core/bin/verified_sources_fetcher", - "core/bin/zksync_server", - "core/bin/genesis_generator", - "core/bin/zksync_tee_prover", - # Node services - "core/node/node_framework", - "core/node/proof_data_handler", - "core/node/block_reverter", - "core/node/commitment_generator", - "core/node/house_keeper", - "core/node/genesis", - "core/node/shared_metrics", - "core/node/db_pruner", - "core/node/fee_model", - "core/node/da_dispatcher", - "core/node/eth_sender", - "core/node/vm_runner", - "core/node/test_utils", - "core/node/state_keeper", - "core/node/reorg_detector", - "core/node/consistency_checker", - "core/node/metadata_calculator", - "core/node/node_sync", - "core/node/node_storage_init", - "core/node/consensus", - "core/node/contract_verification_server", - "core/node/api_server", - "core/node/tee_verifier_input_producer", - "core/node/base_token_adjuster", - "core/node/external_proof_integration_api", - "core/node/logs_bloom_backfill", - "core/node/da_clients", - # Libraries - "core/lib/db_connection", - "core/lib/zksync_core_leftovers", - "core/lib/basic_types", - "core/lib/config", - "core/lib/constants", - "core/lib/contract_verifier", - "core/lib/contracts", - "core/lib/circuit_breaker", - "core/lib/dal", - "core/lib/env_config", - "core/lib/da_client", - "core/lib/eth_client", - "core/lib/eth_signer", - "core/lib/l1_contract_interface", - "core/lib/mempool", - "core/lib/merkle_tree", - "core/lib/mini_merkle_tree", - "core/lib/node_framework_derive", - "core/lib/object_store", - "core/lib/prover_interface", - "core/lib/queued_job_processor", - "core/lib/state", - "core/lib/storage", - "core/lib/tee_verifier", - "core/lib/types", - "core/lib/protobuf_config", - "core/lib/utils", - "core/lib/vlog", - "core/lib/multivm", - "core/lib/vm_interface", - "core/lib/vm_executor", - "core/lib/web3_decl", - "core/lib/snapshots_applier", - "core/lib/crypto_primitives", - "core/lib/external_price_api", - # Test infrastructure - "core/tests/test_account", - "core/tests/loadnext", - "core/tests/vm-benchmark", + # Binaries + "core/bin/block_reverter", + "core/bin/contract-verifier", + "core/bin/external_node", + "core/bin/merkle_tree_consistency_checker", + "core/bin/snapshots_creator", + "core/bin/selector_generator", + "core/bin/system-constants-generator", + "core/bin/verified_sources_fetcher", + "core/bin/zksync_server", + "core/bin/genesis_generator", + "core/bin/zksync_tee_prover", + # Node services + "core/node/node_framework", + "core/node/proof_data_handler", + "core/node/block_reverter", + "core/node/commitment_generator", + "core/node/house_keeper", + "core/node/genesis", + "core/node/shared_metrics", + "core/node/db_pruner", + "core/node/fee_model", + "core/node/da_dispatcher", + "core/node/eth_sender", + "core/node/vm_runner", + "core/node/test_utils", + "core/node/state_keeper", + "core/node/reorg_detector", + "core/node/consistency_checker", + "core/node/metadata_calculator", + "core/node/node_sync", + "core/node/node_storage_init", + "core/node/consensus", + "core/node/contract_verification_server", + "core/node/api_server", + "core/node/base_token_adjuster", + "core/node/external_proof_integration_api", + "core/node/logs_bloom_backfill", + "core/node/da_clients", + # Libraries + "core/lib/db_connection", + "core/lib/zksync_core_leftovers", + "core/lib/basic_types", + "core/lib/config", + "core/lib/constants", + "core/lib/contract_verifier", + "core/lib/contracts", + "core/lib/circuit_breaker", + "core/lib/dal", + "core/lib/env_config", + "core/lib/da_client", + "core/lib/eth_client", + "core/lib/eth_signer", + "core/lib/l1_contract_interface", + "core/lib/mempool", + "core/lib/merkle_tree", + "core/lib/mini_merkle_tree", + "core/lib/node_framework_derive", + "core/lib/object_store", + "core/lib/prover_interface", + "core/lib/queued_job_processor", + "core/lib/state", + "core/lib/storage", + "core/lib/tee_verifier", + "core/lib/types", + "core/lib/protobuf_config", + "core/lib/utils", + "core/lib/vlog", + "core/lib/multivm", + "core/lib/vm_interface", + "core/lib/vm_executor", + "core/lib/web3_decl", + "core/lib/snapshots_applier", + "core/lib/crypto_primitives", + "core/lib/external_price_api", + # Test infrastructure + "core/tests/test_account", + "core/tests/loadnext", + "core/tests/vm-benchmark", ] resolver = "2" @@ -111,9 +110,11 @@ backon = "0.4.4" bigdecimal = "0.4.5" bincode = "1" blake2 = "0.10" +bytes = "1" chrono = "0.4" clap = "4.2.2" codegen = "0.2.0" +const-decoder = "0.4.0" criterion = "0.4.0" ctrlc = "3.1" dashmap = "5.5.3" @@ -155,7 +156,7 @@ rayon = "1.3.1" regex = "1" reqwest = "0.12" rlp = "0.5" -rocksdb = "0.21.0" +rocksdb = "0.21" rustc_version = "0.4.0" rustls = "0.23" secp256k1 = { version = "0.27.0", features = ["recovery", "global-context"] } @@ -172,6 +173,7 @@ sqlx = "0.8.1" static_assertions = "1.1" structopt = "0.3.20" strum = "0.26" +strum_macros = "0.26.4" tempfile = "3.0.2" test-casing = "0.1.2" test-log = "0.2.15" @@ -185,7 +187,7 @@ tower-http = "0.5.2" tracing = "0.1" tracing-subscriber = "0.3" tracing-opentelemetry = "0.25.0" -time = "0.3.36" # Has to be same as used by `tracing-subscriber` +time = "0.3.36" # Has to be same as used by `tracing-subscriber` url = "2" web3 = "0.19.0" fraction = "0.15.3" @@ -217,30 +219,30 @@ circuit_sequencer_api_1_3_3 = { package = "circuit_sequencer_api", version = "0. circuit_sequencer_api_1_4_0 = { package = "circuit_sequencer_api", version = "0.140" } circuit_sequencer_api_1_4_1 = { package = "circuit_sequencer_api", version = "0.141" } circuit_sequencer_api_1_4_2 = { package = "circuit_sequencer_api", version = "0.142" } -circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.5" } +circuit_sequencer_api_1_5_0 = { package = "circuit_sequencer_api", version = "=0.150.6" } crypto_codegen = { package = "zksync_solidity_vk_codegen", version = "=0.30.1" } -kzg = { package = "zksync_kzg", version = "=0.150.5" } +kzg = { package = "zksync_kzg", version = "=0.150.6" } zk_evm = { version = "=0.133.0" } zk_evm_1_3_1 = { package = "zk_evm", version = "0.131.0-rc.2" } zk_evm_1_3_3 = { package = "zk_evm", version = "0.133" } zk_evm_1_4_0 = { package = "zk_evm", version = "0.140" } zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } -zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } +zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.6" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "74577d9be13b1bff9d1a712389731f669b179e47" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "df5bec3d04d64d434f9b0ccb285ba4681008f7b3" } # Consensus dependencies. -zksync_concurrency = "=0.3.0" -zksync_consensus_bft = "=0.3.0" -zksync_consensus_crypto = "=0.3.0" -zksync_consensus_executor = "=0.3.0" -zksync_consensus_network = "=0.3.0" -zksync_consensus_roles = "=0.3.0" -zksync_consensus_storage = "=0.3.0" -zksync_consensus_utils = "=0.3.0" -zksync_protobuf = "=0.3.0" -zksync_protobuf_build = "=0.3.0" +zksync_concurrency = "=0.5.0" +zksync_consensus_bft = "=0.5.0" +zksync_consensus_crypto = "=0.5.0" +zksync_consensus_executor = "=0.5.0" +zksync_consensus_network = "=0.5.0" +zksync_consensus_roles = "=0.5.0" +zksync_consensus_storage = "=0.5.0" +zksync_consensus_utils = "=0.5.0" +zksync_protobuf = "=0.5.0" +zksync_protobuf_build = "=0.5.0" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } @@ -307,6 +309,5 @@ zksync_node_storage_init = { version = "0.1.0", path = "core/node/node_storage_i zksync_node_consensus = { version = "0.1.0", path = "core/node/consensus" } zksync_contract_verification_server = { version = "0.1.0", path = "core/node/contract_verification_server" } zksync_node_api_server = { version = "0.1.0", path = "core/node/api_server" } -zksync_tee_verifier_input_producer = { version = "0.1.0", path = "core/node/tee_verifier_input_producer" } zksync_base_token_adjuster = { version = "0.1.0", path = "core/node/base_token_adjuster" } zksync_logs_bloom_backfill = { version = "0.1.0", path = "core/node/logs_bloom_backfill" } diff --git a/bin/ci_localnet_up b/bin/ci_localnet_up index 8673a909af7..c399de410d7 100755 --- a/bin/ci_localnet_up +++ b/bin/ci_localnet_up @@ -4,6 +4,5 @@ set -e cd $ZKSYNC_HOME -mkdir -p ./volumes/postgres ./volumes/reth/data run_retried docker-compose pull docker-compose --profile runner up -d --wait diff --git a/bin/prover_checkers/batch_availability_checker b/bin/prover_checkers/batch_availability_checker new file mode 100644 index 00000000000..ae7aade2f68 --- /dev/null +++ b/bin/prover_checkers/batch_availability_checker @@ -0,0 +1,40 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check availability for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM witness_inputs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + echo "Batch is not available yet. Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/batch_l1_status_checker b/bin/prover_checkers/batch_l1_status_checker new file mode 100755 index 00000000000..24f26e354ea --- /dev/null +++ b/bin/prover_checkers/batch_l1_status_checker @@ -0,0 +1,54 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Needs following configuration +# URL - URL of the API endpoint +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +echo "URL: $URL" + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the curl request and capture the response + RESPONSE=$(curl --silent --request POST \ + --url $URL \ + --header 'Content-Type: application/json' \ + --data '{ + "jsonrpc": "2.0", + "id": 1, + "method": "zks_getBlockDetails", + "params": [1] + }') + + # Parse the executedAt field using jq + EXECUTED_AT=$(echo $RESPONSE | jq -r '.result.executedAt') + + # Check if executedAt is not null + if [ "$EXECUTED_AT" != "null" ] && [ -n "$EXECUTED_AT" ]; then + echo "executedAt is not null: $EXECUTED_AT" + echo "true" + exit 0 # Exit with zero status to succeed CI + else + DATABASE_STATUS=$(psql $DATABASE_URL -c "SELECT status FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER;" -t -A) + echo "executedAt is null, database status is $DATABASE_STATUS, retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/prover_checkers/kill_prover b/bin/prover_checkers/kill_prover new file mode 100644 index 00000000000..2a65aea2d67 --- /dev/null +++ b/bin/prover_checkers/kill_prover @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Use pkill to find and kill processes using circuit prover +if ! pkill -f 'zksync_circuit_prover|zkstack prover run --component=circuit-prover'; then + echo "No processes are currently using the GPU." + exit 0 +fi + +echo "All GPU-related processes have been killed." diff --git a/bin/prover_checkers/prover_jobs_status_checker b/bin/prover_checkers/prover_jobs_status_checker new file mode 100755 index 00000000000..6816d9a2d14 --- /dev/null +++ b/bin/prover_checkers/prover_jobs_status_checker @@ -0,0 +1,42 @@ +#!/usr/bin/env bash + +set -o errexit +set -o pipefail + +# Configuration +# DATABASE_URL - The URL of the prover database to connect to +# BATCH_NUMBER - The batch number to check readiness for +# INTERVAL - Time interval for polling in seconds +# TIMEOUT - Timeout of script in seconds + +# Start timer +START_TIME=$(date +%s) + +# Loop to query periodically +while true; do + # Calculate the elapsed time + CURRENT_TIME=$(date +%s) + ELAPSED_TIME=$((CURRENT_TIME - START_TIME)) + + # Check if the timeout has been reached + if [ $ELAPSED_TIME -ge $TIMEOUT ]; then + echo "Timeout reached. Failing CI..." + exit 1 # Exit with non-zero status to fail CI + fi + + # Run the SQL query and capture the result + RESULT=$(psql $DATABASE_URL -c "SELECT count(*) FROM proof_compression_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER AND status = 'queued';" -t -A) + + # Check if the result is 1 + if [ "$RESULT" -eq 1 ]; then + echo "Query result is 1. Success!" + exit 0 # Exit with zero status to succeed CI + else + STATUS=$(psql $DATABASE_URL -c "SELECT COUNT(*), status FROM prover_jobs_fri WHERE l1_batch_number = $BATCH_NUMBER GROUP BY status;" -t -A) + echo "Current status is $STATUS" + echo "Retrying in $INTERVAL seconds..." + fi + + # Wait for the next interval + sleep $INTERVAL +done diff --git a/bin/run_on_all_chains.sh b/bin/run_on_all_chains.sh new file mode 100755 index 00000000000..68b6e81662f --- /dev/null +++ b/bin/run_on_all_chains.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +# Colors for the terminal output +RED='\033[0;31m' +GREEN='\033[0;32m' +NC='\033[0m' # No Color + + +command=$1 +chain_list=$2 +log_dir=$3 +IFS=',' read -r -a chains <<< "$chain_list" +pids=() +statuses=() + +# Start background processes +for i in "${!chains[@]}"; do + eval "$command --chain ${chains[$i]} &> ${log_dir}/${chains[$i]}.log" & + pids+=($!) +done + +# Wait for all processes to complete and capture their exit statuses +for i in "${!pids[@]}"; do + wait ${pids[$i]} + statuses[$i]=$? +done + +# Check exit statuses and set overall status +overall_status=0 + +for i in "${!statuses[@]}"; do + if [ ${statuses[$i]} -ne 0 ]; then + overall_status=1 + echo -e "${RED}✗ ERROR (exit code ${statuses[$i]}): ${chains[$i]}${NC}" + else + echo -e "${GREEN}✓ SUCCESS: ${chains[$i]}${NC}" + fi +done + +# Exit with overall status +exit $overall_status diff --git a/bin/zk b/bin/zk index 868c4e338cd..f3b927de8f8 100755 --- a/bin/zk +++ b/bin/zk @@ -39,6 +39,7 @@ check_yarn_version() { # and it will be hard for them to see what went wrong. check_subdirectory check_yarn_version + if [ -z "$1" ]; then cd $ZKSYNC_HOME run_retried yarn install --frozen-lockfile && yarn utils build && yarn zk build diff --git a/bin/zkt b/bin/zkt deleted file mode 100755 index f781ca67528..00000000000 --- a/bin/zkt +++ /dev/null @@ -1,17 +0,0 @@ -#!/usr/bin/env bash - -cd $(dirname $0) - -if which zkup >/dev/null; then - cargo uninstall zk_inception - cargo uninstall zk_supervisor - git config --local core.hooksPath || - git config --local core.hooksPath ./.githooks - zkup -p .. --alias -else - echo zkup is not installed, please install it https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup - cd ../zk_toolbox - cargo install --path ./crates/zk_inception --force - cargo install --path ./crates/zk_supervisor --force -fi - diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 6cf2ff4419a..56239303cd4 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,103 @@ # Changelog +## [25.0.0](https://github.com/matter-labs/zksync-era/compare/core-v24.29.0...core-v25.0.0) (2024-10-23) + + +### ⚠ BREAKING CHANGES + +* **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) + +### Features + +* Add CoinMarketCap external API ([#2971](https://github.com/matter-labs/zksync-era/issues/2971)) ([c1cb30e](https://github.com/matter-labs/zksync-era/commit/c1cb30e59ca1d0b5fea5fe0980082aea0eb04aa2)) +* **api:** Implement eth_maxPriorityFeePerGas ([#3135](https://github.com/matter-labs/zksync-era/issues/3135)) ([35e84cc](https://github.com/matter-labs/zksync-era/commit/35e84cc03a7fdd315932fb3020fe41c95a6e4bca)) +* **api:** Make acceptable values cache lag configurable ([#3028](https://github.com/matter-labs/zksync-era/issues/3028)) ([6747529](https://github.com/matter-labs/zksync-era/commit/67475292ff770d2edd6884be27f976a4144778ae)) +* **contracts:** integrate protocol defense changes ([#2737](https://github.com/matter-labs/zksync-era/issues/2737)) ([c60a348](https://github.com/matter-labs/zksync-era/commit/c60a3482ee09b3e371163e62f49e83bc6d6f4548)) +* **external-node:** save protocol version before opening a batch ([#3136](https://github.com/matter-labs/zksync-era/issues/3136)) ([d6de4f4](https://github.com/matter-labs/zksync-era/commit/d6de4f40ddce339c760c95e2bf4b8aceb571af7f)) +* Prover e2e test ([#2975](https://github.com/matter-labs/zksync-era/issues/2975)) ([0edd796](https://github.com/matter-labs/zksync-era/commit/0edd7962429b3530ae751bd7cc947c97193dd0ca)) +* **prover:** Add min_provers and dry_run features. Improve metrics and test. ([#3129](https://github.com/matter-labs/zksync-era/issues/3129)) ([7c28964](https://github.com/matter-labs/zksync-era/commit/7c289649b7b3c418c7193a35b51c264cf4970f3c)) +* **tee_verifier:** speedup SQL query for new jobs ([#3133](https://github.com/matter-labs/zksync-era/issues/3133)) ([30ceee8](https://github.com/matter-labs/zksync-era/commit/30ceee8a48046e349ff0234ebb24d468a0e0876c)) +* vm2 tracers can access storage ([#3114](https://github.com/matter-labs/zksync-era/issues/3114)) ([e466b52](https://github.com/matter-labs/zksync-era/commit/e466b52948e3c4ed1cb5af4fd999a52028e4d216)) +* **vm:** Return compressed bytecodes from `push_transaction()` ([#3126](https://github.com/matter-labs/zksync-era/issues/3126)) ([37f209f](https://github.com/matter-labs/zksync-era/commit/37f209fec8e7cb65c0e60003d46b9ea69c43caf1)) + + +### Bug Fixes + +* **call_tracer:** Flat call tracer fixes for blocks ([#3095](https://github.com/matter-labs/zksync-era/issues/3095)) ([30ddb29](https://github.com/matter-labs/zksync-era/commit/30ddb292977340beab37a81f75c35480cbdd59d3)) +* **consensus:** preventing config update reverts ([#3148](https://github.com/matter-labs/zksync-era/issues/3148)) ([caee55f](https://github.com/matter-labs/zksync-era/commit/caee55fef4eed0ec58cceaeba277bbdedf5c6f51)) +* **en:** Return `SyncState` health check ([#3142](https://github.com/matter-labs/zksync-era/issues/3142)) ([abeee81](https://github.com/matter-labs/zksync-era/commit/abeee8190d3c3a5e577d71024bdfb30ff516ad03)) +* **external-node:** delete empty unsealed batch on EN initialization ([#3125](https://github.com/matter-labs/zksync-era/issues/3125)) ([5d5214b](https://github.com/matter-labs/zksync-era/commit/5d5214ba983823b306495d34fdd1d46abacce07a)) +* Fix counter metric type to be Counter. ([#3153](https://github.com/matter-labs/zksync-era/issues/3153)) ([08a3fe7](https://github.com/matter-labs/zksync-era/commit/08a3fe7ffd0410c51334193068649905337d5e84)) +* **mempool:** minor mempool improvements ([#3113](https://github.com/matter-labs/zksync-era/issues/3113)) ([cd16083](https://github.com/matter-labs/zksync-era/commit/cd160830a0b7ebe5af4ecbd944da1cd51af3528a)) +* **prover:** Run for zero queue to allow scaling down to 0 ([#3115](https://github.com/matter-labs/zksync-era/issues/3115)) ([bbe1919](https://github.com/matter-labs/zksync-era/commit/bbe191937fa5c5711a7164fd4f0c2ae65cda0833)) +* restore instruction count functionality ([#3081](https://github.com/matter-labs/zksync-era/issues/3081)) ([6159f75](https://github.com/matter-labs/zksync-era/commit/6159f7531a0340a69c4926c4e0325811ed7cabb8)) +* **state-keeper:** save call trace for upgrade txs ([#3132](https://github.com/matter-labs/zksync-era/issues/3132)) ([e1c363f](https://github.com/matter-labs/zksync-era/commit/e1c363f8f5e03c8d62bba1523f17b87d6a0e25ad)) +* **tee_prover:** add zstd compression ([#3144](https://github.com/matter-labs/zksync-era/issues/3144)) ([7241ae1](https://github.com/matter-labs/zksync-era/commit/7241ae139b2b6bf9a9966eaa2f22203583a3786f)) +* **tee_verifier:** correctly initialize storage for re-execution ([#3017](https://github.com/matter-labs/zksync-era/issues/3017)) ([9d88373](https://github.com/matter-labs/zksync-era/commit/9d88373f1b745c489e98e5ef542644a70e815498)) + +## [24.29.0](https://github.com/matter-labs/zksync-era/compare/core-v24.28.0...core-v24.29.0) (2024-10-14) + + +### Features + +* Add initial version prover_autoscaler ([#2993](https://github.com/matter-labs/zksync-era/issues/2993)) ([ebf9604](https://github.com/matter-labs/zksync-era/commit/ebf9604c5ab2a1cae1ffd2f9c922f35a1d0ad876)) +* add metric to track current cbt ratio ([#3020](https://github.com/matter-labs/zksync-era/issues/3020)) ([3fd2fb1](https://github.com/matter-labs/zksync-era/commit/3fd2fb14e7283c6858731e162522e70051a8e162)) +* **configs:** Add port parameter to ConsensusConfig ([#2986](https://github.com/matter-labs/zksync-era/issues/2986)) ([25112df](https://github.com/matter-labs/zksync-era/commit/25112df39d052f083bc45964f0298b3af5842cac)) +* **configs:** Add port parameter to ConsensusConfig ([#3051](https://github.com/matter-labs/zksync-era/issues/3051)) ([038c397](https://github.com/matter-labs/zksync-era/commit/038c397ce842601da5109c460b09dbf9d51cf2fc)) +* **consensus:** smooth transition to p2p syncing (BFT-515) ([#3075](https://github.com/matter-labs/zksync-era/issues/3075)) ([5d339b4](https://github.com/matter-labs/zksync-era/commit/5d339b46fee66bc3a45493586626d318380680dd)) +* **consensus:** Support for syncing blocks before consensus genesis over p2p network ([#3040](https://github.com/matter-labs/zksync-era/issues/3040)) ([d3edc3d](https://github.com/matter-labs/zksync-era/commit/d3edc3d817c151ed00d4fa822fdae0a746e33356)) +* **en:** periodically fetch bridge addresses ([#2949](https://github.com/matter-labs/zksync-era/issues/2949)) ([e984bfb](https://github.com/matter-labs/zksync-era/commit/e984bfb8a243bc746549ab9347dc0a367fe02790)) +* **eth-sender:** add time_in_mempool_cap config ([#3018](https://github.com/matter-labs/zksync-era/issues/3018)) ([f6d86bd](https://github.com/matter-labs/zksync-era/commit/f6d86bd7935a1cdbb528b13437424031fda3cb8e)) +* **eth-watch:** catch another reth error ([#3026](https://github.com/matter-labs/zksync-era/issues/3026)) ([4640c42](https://github.com/matter-labs/zksync-era/commit/4640c4233af46c97f207d2dbce5fedd1bcb66c43)) +* Handle new yul compilation flow ([#3038](https://github.com/matter-labs/zksync-era/issues/3038)) ([4035361](https://github.com/matter-labs/zksync-era/commit/40353616f278800dc80fcbe5f2a6483019033b20)) +* **state-keeper:** pre-insert unsealed L1 batches ([#2846](https://github.com/matter-labs/zksync-era/issues/2846)) ([e5b5a3b](https://github.com/matter-labs/zksync-era/commit/e5b5a3b7b62e8d4035fe89c2a287bf3606d17bc5)) +* **vm:** EVM emulator support – base ([#2979](https://github.com/matter-labs/zksync-era/issues/2979)) ([deafa46](https://github.com/matter-labs/zksync-era/commit/deafa460715334a77edf9fe8aa76fa90029342c4)) +* **zk_toolbox:** added support for setting attester committee defined in a separate file ([#2992](https://github.com/matter-labs/zksync-era/issues/2992)) ([6105514](https://github.com/matter-labs/zksync-era/commit/610551427d5ab129f91e69b5efb318da917457d7)) +* **zk_toolbox:** Redesign zk_toolbox commands ([#3003](https://github.com/matter-labs/zksync-era/issues/3003)) ([114834f](https://github.com/matter-labs/zksync-era/commit/114834f357421c62d596a1954fac8ce615cfde49)) +* **zktoolbox:** added checking the contract owner in set-attester-committee command ([#3061](https://github.com/matter-labs/zksync-era/issues/3061)) ([9b0a606](https://github.com/matter-labs/zksync-era/commit/9b0a6067923c5276f560f3abccedc4e6a5167dda)) + + +### Bug Fixes + +* **api:** Accept integer block count in `eth_feeHistory` ([#3077](https://github.com/matter-labs/zksync-era/issues/3077)) ([4d527d4](https://github.com/matter-labs/zksync-era/commit/4d527d4b44b6b083e2a813d48c79d8021ea6f843)) +* **api:** Adapt `eth_getCode` to EVM emulator ([#3073](https://github.com/matter-labs/zksync-era/issues/3073)) ([15fe5a6](https://github.com/matter-labs/zksync-era/commit/15fe5a62f03cd103afd7fa5eb03e27db25686ba9)) +* bincode deserialization for VM run data ([#3044](https://github.com/matter-labs/zksync-era/issues/3044)) ([b0ec79f](https://github.com/matter-labs/zksync-era/commit/b0ec79fcb7fa120f095d987f53c67fdab92e2c79)) +* bincode deserialize for WitnessInputData ([#3055](https://github.com/matter-labs/zksync-era/issues/3055)) ([91d0595](https://github.com/matter-labs/zksync-era/commit/91d0595631cc5f5bffc42a4b04d5015d2be659b1)) +* **external-node:** make fetcher rely on unsealed batches ([#3088](https://github.com/matter-labs/zksync-era/issues/3088)) ([bb5d147](https://github.com/matter-labs/zksync-era/commit/bb5d1470d5e1e8e69d9b79c60284ea8adaee4038)) +* **state-keeper:** ensure unsealed batch is present during IO init ([#3071](https://github.com/matter-labs/zksync-era/issues/3071)) ([bdeb411](https://github.com/matter-labs/zksync-era/commit/bdeb411c593ac3d5e16158e64c4210bb00edcb0c)) +* **vm:** Check protocol version for fast VM ([#3080](https://github.com/matter-labs/zksync-era/issues/3080)) ([a089f3f](https://github.com/matter-labs/zksync-era/commit/a089f3feb916ccc9007d9c32ec909db694b7d9f4)) +* **vm:** Prepare new VM for use in API server and fix divergences ([#2994](https://github.com/matter-labs/zksync-era/issues/2994)) ([741b77e](https://github.com/matter-labs/zksync-era/commit/741b77e080f75c6a93d3ee779b1c9ce4297618f9)) + + +### Reverts + +* **configs:** Add port parameter to ConsensusConfig ([#2986](https://github.com/matter-labs/zksync-era/issues/2986)) ([#3046](https://github.com/matter-labs/zksync-era/issues/3046)) ([abe35bf](https://github.com/matter-labs/zksync-era/commit/abe35bf7aea1120b77fdbd413d927e45da48d26c)) + +## [24.28.0](https://github.com/matter-labs/zksync-era/compare/core-v24.27.0...core-v24.28.0) (2024-10-02) + + +### Features + +* **da-clients:** add secrets ([#2954](https://github.com/matter-labs/zksync-era/issues/2954)) ([f4631e4](https://github.com/matter-labs/zksync-era/commit/f4631e4466de620cc1401b326d864cdb8b48a05d)) +* **eth-sender:** add a cap to time_in_mempool ([#2978](https://github.com/matter-labs/zksync-era/issues/2978)) ([650d42f](https://github.com/matter-labs/zksync-era/commit/650d42fea6124d80b60a8270a303d72ad6ac741e)) +* **eth-watch:** redesign to support multiple chains ([#2867](https://github.com/matter-labs/zksync-era/issues/2867)) ([aa72d84](https://github.com/matter-labs/zksync-era/commit/aa72d849c24a664acd083eba73795ddc5d31d55f)) +* Expose http debug page ([#2952](https://github.com/matter-labs/zksync-era/issues/2952)) ([e0b6488](https://github.com/matter-labs/zksync-era/commit/e0b64888aae7324aec2d40fa0cd51ea7e1450cd9)) +* **zk_toolbox:** add fees integration test to toolbox ([#2898](https://github.com/matter-labs/zksync-era/issues/2898)) ([e7ead76](https://github.com/matter-labs/zksync-era/commit/e7ead760ce0417dd36af3839ac557f7e9ab238a4)) +* **zk_toolbox:** Add SQL format for zk supervisor ([#2950](https://github.com/matter-labs/zksync-era/issues/2950)) ([540e5d7](https://github.com/matter-labs/zksync-era/commit/540e5d7554f54e80d52f1bfae37e03ca8f787baf)) + + +### Bug Fixes + +* **api:** Fix batch fee input for `debug` namespace ([#2948](https://github.com/matter-labs/zksync-era/issues/2948)) ([79b6fcf](https://github.com/matter-labs/zksync-era/commit/79b6fcf8b5d10a0ccdceb846370dd6870b6a32b5)) +* chainstack block limit exceeded ([#2974](https://github.com/matter-labs/zksync-era/issues/2974)) ([4ffbf42](https://github.com/matter-labs/zksync-era/commit/4ffbf426de166c11aaf5d7b5ed7d199644fba229)) +* **eth-watch:** add missing check that from_block is not larger than finalized_block ([#2969](https://github.com/matter-labs/zksync-era/issues/2969)) ([3f406c7](https://github.com/matter-labs/zksync-era/commit/3f406c7d0c0e76d798c2d838abde57ca692822c0)) +* ignore unknown fields in rpc json response ([#2962](https://github.com/matter-labs/zksync-era/issues/2962)) ([692ea73](https://github.com/matter-labs/zksync-era/commit/692ea73f75a5fb9db2b4ac33ad24d20568638742)) + + +### Performance Improvements + +* **api:** More efficient gas estimation ([#2937](https://github.com/matter-labs/zksync-era/issues/2937)) ([3b69e37](https://github.com/matter-labs/zksync-era/commit/3b69e37e470dab859a55787f6cc971e7083de2fd)) + ## [24.27.0](https://github.com/matter-labs/zksync-era/compare/core-v24.26.0...core-v24.27.0) (2024-09-25) diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index d841ee5b42e..4e3dc548cf8 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.27.0" # x-release-please-version +version = "25.0.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index af26d5e80ce..420a6941c81 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -462,6 +462,8 @@ pub(crate) struct OptionalENConfig { /// Gateway RPC URL, needed for operating during migration. #[allow(dead_code)] pub gateway_url: Option, + /// Interval for bridge addresses refreshing in seconds. + bridge_addresses_refresh_interval_sec: Option, } impl OptionalENConfig { @@ -692,6 +694,7 @@ impl OptionalENConfig { api_namespaces, contracts_diamond_proxy_addr: None, gateway_url: enconfig.gateway_url.clone(), + bridge_addresses_refresh_interval_sec: enconfig.bridge_addresses_refresh_interval_sec, }) } @@ -918,6 +921,11 @@ impl OptionalENConfig { Duration::from_secs(self.pruning_data_retention_sec) } + pub fn bridge_addresses_refresh_interval(&self) -> Option { + self.bridge_addresses_refresh_interval_sec + .map(|n| Duration::from_secs(n.get())) + } + #[cfg(test)] fn mock() -> Self { // Set all values to their defaults @@ -1416,9 +1424,9 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { l2_erc20_default_bridge: config.remote.l2_erc20_bridge_addr, l1_shared_default_bridge: config.remote.l1_shared_bridge_proxy_addr, l2_shared_default_bridge: config.remote.l2_shared_bridge_addr, + l2_legacy_shared_bridge: config.remote.l2_legacy_shared_bridge_addr, l1_weth_bridge: config.remote.l1_weth_bridge_addr, l2_weth_bridge: config.remote.l2_weth_bridge_addr, - l2_legacy_shared_bridge: config.remote.l2_legacy_shared_bridge_addr, }, bridgehub_proxy_addr: config.remote.bridgehub_proxy_addr, state_transition_proxy_addr: config.remote.state_transition_proxy_addr, diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index e6284cb7f24..3a43d9d492d 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -11,7 +11,9 @@ use zksync_config::{ }, PostgresConfig, }; -use zksync_metadata_calculator::{MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig}; +use zksync_metadata_calculator::{ + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, +}; use zksync_node_api_server::web3::Namespace; use zksync_node_framework::{ implementations::layers::{ @@ -25,7 +27,7 @@ use zksync_node_framework::{ logs_bloom_backfill::LogsBloomBackfillLayer, main_node_client::MainNodeClientLayer, main_node_fee_params_fetcher::MainNodeFeeParamsFetcherLayer, - metadata_calculator::MetadataCalculatorLayer, + metadata_calculator::{MetadataCalculatorLayer, TreeApiServerLayer}, node_storage_init::{ external_node_strategy::{ExternalNodeInitStrategyLayer, SnapshotRecoveryConfig}, NodeStorageInitializerLayer, @@ -55,6 +57,7 @@ use zksync_node_framework::{ service::{ZkStackService, ZkStackServiceBuilder}, }; use zksync_state::RocksdbStorageOptions; +use zksync_types::L2_NATIVE_TOKEN_VAULT_ADDRESS; use crate::{config::ExternalNodeConfig, metrics::framework::ExternalNodeMetricsLayer, Component}; @@ -191,8 +194,22 @@ impl ExternalNodeBuilder { // compression. const OPTIONAL_BYTECODE_COMPRESSION: bool = true; + let l2_shared_bridge_addr = self + .config + .remote + .l2_shared_bridge_addr + .context("Missing `l2_shared_bridge_addr`")?; + let l2_legacy_shared_bridge_addr = if l2_shared_bridge_addr == L2_NATIVE_TOKEN_VAULT_ADDRESS + { + // System has migrated to `L2_NATIVE_TOKEN_VAULT_ADDRESS`, use legacy shared bridge address from main node. + self.config.remote.l2_legacy_shared_bridge_addr + } else { + // System hasn't migrated on `L2_NATIVE_TOKEN_VAULT_ADDRESS`, we can safely use `l2_shared_bridge_addr`. + Some(l2_shared_bridge_addr) + }; + let persistence_layer = OutputHandlerLayer::new( - self.config.remote.l2_legacy_shared_bridge_addr, + l2_legacy_shared_bridge_addr, self.config.optional.l2_block_seal_queue_capacity, ) .with_pre_insert_txs(true) // EN requires txs to be pre-inserted. @@ -369,11 +386,35 @@ impl ExternalNodeBuilder { Ok(self) } + fn add_isolated_tree_api_layer(mut self) -> anyhow::Result { + let reader_config = MerkleTreeReaderConfig { + db_path: self.config.required.merkle_tree_path.clone(), + max_open_files: self.config.optional.merkle_tree_max_open_files, + multi_get_chunk_size: self.config.optional.merkle_tree_multi_get_chunk_size, + block_cache_capacity: self.config.optional.merkle_tree_block_cache_size(), + include_indices_and_filters_in_block_cache: self + .config + .optional + .merkle_tree_include_indices_and_filters_in_block_cache, + }; + let api_config = MerkleTreeApiConfig { + port: self + .config + .tree_component + .api_port + .context("should contain tree api port")?, + }; + self.node + .add_layer(TreeApiServerLayer::new(reader_config, api_config)); + Ok(self) + } + fn add_tx_sender_layer(mut self) -> anyhow::Result { let postgres_storage_config = PostgresStorageCachesConfig { factory_deps_cache_size: self.config.optional.factory_deps_cache_size() as u64, initial_writes_cache_size: self.config.optional.initial_writes_cache_size() as u64, latest_values_cache_size: self.config.optional.latest_values_cache_size() as u64, + latest_values_max_block_lag: 20, // reasonable default }; let max_vm_concurrency = self.config.optional.vm_concurrency_limit; let tx_sender_layer = TxSenderLayer::new( @@ -426,6 +467,10 @@ impl ExternalNodeBuilder { response_body_size_limit: Some(self.config.optional.max_response_body_size()), with_extended_tracing: self.config.optional.extended_rpc_tracing, pruning_info_refresh_interval: Some(pruning_info_refresh_interval), + bridge_addresses_refresh_interval: self + .config + .optional + .bridge_addresses_refresh_interval(), polling_interval: Some(self.config.optional.polling_interval()), websocket_requests_per_minute_limit: None, // To be set by WS server layer method if required. replication_lag_limit: None, // TODO: Support replication lag limit @@ -586,11 +631,11 @@ impl ExternalNodeBuilder { self = self.add_metadata_calculator_layer(with_tree_api)?; } Component::TreeApi => { - anyhow::ensure!( - components.contains(&Component::Tree), - "Merkle tree API cannot be started without a tree component" - ); - // Do nothing, will be handled by the `Tree` component. + if components.contains(&Component::Tree) { + // Do nothing, will be handled by the `Tree` component. + } else { + self = self.add_isolated_tree_api_layer()?; + } } Component::TreeFetcher => { self = self.add_tree_data_fetcher_layer()?; diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index efd76d4fa42..2155de7c020 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -17,15 +17,23 @@ mod utils; const SHUTDOWN_TIMEOUT: Duration = Duration::from_secs(10); const POLL_INTERVAL: Duration = Duration::from_millis(100); -#[test_casing(3, ["all", "core", "api"])] +#[test_casing(4, ["all", "core", "api", "core,tree_api"])] #[tokio::test] #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging - let (env, env_handles) = utils::TestEnvironment::with_genesis_block(components_str).await; - let expected_health_components = utils::expected_health_components(&env.components); + let mut expected_health_components = utils::expected_health_components(&env.components); + let expected_shutdown_components = expected_health_components.clone(); + let has_core_or_api = env.components.0.iter().any(|component| { + [Component::Core, Component::HttpApi, Component::WsApi].contains(component) + }); + if has_core_or_api { + // The `sync_state` component doesn't signal its shutdown, but should be present in the list of components + expected_health_components.push("sync_state"); + } + let l2_client = utils::mock_l2_client(&env); let eth_client = utils::mock_eth_client(env.config.remote.user_facing_diamond_proxy); @@ -84,7 +92,7 @@ async fn external_node_basics(components_str: &'static str) { let health_data = app_health.check_health().await; tracing::info!(?health_data, "final health data"); assert_matches!(health_data.inner().status(), HealthStatus::ShutDown); - for name in expected_health_components { + for name in expected_shutdown_components { let component_health = &health_data.components()[name]; assert_matches!(component_health.status(), HealthStatus::ShutDown); } @@ -162,40 +170,3 @@ async fn running_tree_without_core_is_not_allowed() { err ); } - -#[tokio::test] -async fn running_tree_api_without_tree_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging - let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; - - let l2_client = utils::mock_l2_client(&env); - let eth_client = utils::mock_eth_client(env.config.remote.user_facing_diamond_proxy); - - let node_handle = tokio::task::spawn_blocking(move || { - std::thread::spawn(move || { - let mut node = ExternalNodeBuilder::new(env.config)?; - inject_test_layers( - &mut node, - env.sigint_receiver, - env.app_health_sender, - eth_client, - l2_client, - ); - - // We're only interested in the error, so we drop the result. - node.build(env.components.0.into_iter().collect()).map(drop) - }) - .join() - .unwrap() - }); - - // Check that we cannot build the node without the core component. - let result = node_handle.await.expect("Building the node panicked"); - let err = result.expect_err("Building the node with tree api but without tree should fail"); - assert!( - err.to_string() - .contains("Merkle tree API cannot be started without a tree component"), - "Unexpected errror: {}", - err - ); -} diff --git a/core/bin/genesis_generator/src/main.rs b/core/bin/genesis_generator/src/main.rs index 4f8200b3af7..2a96cdc6c6c 100644 --- a/core/bin/genesis_generator/src/main.rs +++ b/core/bin/genesis_generator/src/main.rs @@ -87,6 +87,7 @@ async fn generate_new_config( genesis_commitment: None, bootloader_hash: Some(base_system_contracts.bootloader), default_aa_hash: Some(base_system_contracts.default_aa), + evm_emulator_hash: base_system_contracts.evm_emulator, ..genesis_config }; diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index 084c8037e2c..16167975cf0 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -3,13 +3,13 @@ use std::{cell::RefCell, rc::Rc}; use once_cell::sync::Lazy; use zksync_contracts::{ load_sys_contract, read_bootloader_code, read_bytecode_from_path, read_sys_contract_bytecode, - BaseSystemContracts, ContractLanguage, SystemContractCode, + read_yul_bytecode, BaseSystemContracts, ContractLanguage, SystemContractCode, }; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView, WriteStorage}, tracer::VmExecutionStopReason, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterface, VmInterfaceExt, }, tracers::dynamic::vm_1_5_0::DynTracer, @@ -71,12 +71,14 @@ pub static GAS_TEST_SYSTEM_CONTRACTS: Lazy = Lazy::new(|| { let bytecode = read_sys_contract_bytecode("", "DefaultAccount", ContractLanguage::Sol); let hash = hash_bytecode(&bytecode); + BaseSystemContracts { default_aa: SystemContractCode { code: bytes_to_be_words(bytecode), hash, }, bootloader, + evm_emulator: None, } }); @@ -169,9 +171,16 @@ pub(super) fn get_l1_txs(number_of_txs: usize) -> (Vec, Vec Vec { - read_bytecode_from_path(format!( + if let Some(contract) = read_bytecode_from_path(format!( "contracts/system-contracts/zkout/{test}.yul/contracts-preprocessed/bootloader/{test}.yul.json", - )) + )){ + contract + } else { + read_yul_bytecode( + "contracts/system-contracts/bootloader/tests/artifacts", + test + ) + } } fn default_l1_batch() -> L1BatchEnv { @@ -221,6 +230,7 @@ pub(super) fn execute_internal_transfer_test() -> u32 { let base_system_smart_contracts = BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, }; let system_env = SystemEnv { @@ -231,7 +241,6 @@ pub(super) fn execute_internal_transfer_test() -> u32 { execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::default(), - pubdata_params: Default::default(), }; let eth_token_sys_contract = load_sys_contract("L2BaseToken"); @@ -262,8 +271,9 @@ pub(super) fn execute_internal_transfer_test() -> u32 { output: tracer_result.clone(), } .into_tracer_pointer(); + let mut vm: Vm<_, HistoryEnabled> = Vm::new(l1_batch, system_env, storage_view.to_rc_ptr()); - let result = vm.inspect(&mut tracer.into(), VmExecutionMode::Bootloader); + let result = vm.inspect(&mut tracer.into(), InspectExecutionMode::Bootloader); assert!(!result.result.is_failed(), "The internal call has reverted"); tracer_result.take() @@ -314,7 +324,6 @@ pub(super) fn execute_user_txs_in_test_gas_vm( execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::default(), - pubdata_params: Default::default(), }; let mut vm: Vm<_, HistoryEnabled> = @@ -323,7 +332,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( let mut total_gas_refunded = 0; for tx in txs { vm.push_transaction(tx); - let tx_execution_result = vm.execute(VmExecutionMode::OneTx); + let tx_execution_result = vm.execute(InspectExecutionMode::OneTx); total_gas_refunded += tx_execution_result.refunds.gas_refunded; if !accept_failure { @@ -335,7 +344,7 @@ pub(super) fn execute_user_txs_in_test_gas_vm( } } - let result = vm.execute(VmExecutionMode::Bootloader); + let result = vm.execute(InspectExecutionMode::Bootloader); let metrics = result.get_execution_metrics(None); VmSpentResourcesResult { diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 22db202585d..72fdc8de5cd 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -49,7 +49,7 @@ struct Cli { /// Comma-separated list of components to launch. #[arg( long, - default_value = "api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,vm_runner_protective_reads" + default_value = "api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,vm_runner_protective_reads" )] components: ComponentsToRun, /// Path to the yaml config. If set, it will be used instead of env vars. diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index e684a72a45c..0ac50e624cd 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -4,8 +4,8 @@ use anyhow::Context; use zksync_config::{ configs::{ - da_client::DAClientConfig, eth_sender::PubdataSendingMode, gateway::GatewayChainConfig, - secrets::DataAvailabilitySecrets, wallets::Wallets, GeneralConfig, Secrets, + da_client::DAClientConfig, gateway::GatewayChainConfig, secrets::DataAvailabilitySecrets, + wallets::Wallets, GeneralConfig, Secrets, }, ContractsConfig, GenesisConfig, }; @@ -19,9 +19,7 @@ use zksync_node_framework::{ implementations::layers::{ base_token::{ base_token_ratio_persister::BaseTokenRatioPersisterLayer, - base_token_ratio_provider::BaseTokenRatioProviderLayer, - coingecko_client::CoingeckoClientLayer, forced_price_client::ForcedPriceClientLayer, - no_op_external_price_api_client::NoOpExternalPriceApiClientLayer, + base_token_ratio_provider::BaseTokenRatioProviderLayer, ExternalPriceApiLayer, }, circuit_breaker_checker::CircuitBreakerCheckerLayer, commitment_generator::CommitmentGeneratorLayer, @@ -57,7 +55,6 @@ use zksync_node_framework::{ main_batch_executor::MainBatchExecutorLayer, mempool_io::MempoolIOLayer, output_handler::OutputHandlerLayer, RocksdbStorageOptions, StateKeeperLayer, }, - tee_verifier_input_producer::TeeVerifierInputProducerLayer, vm_runner::{ bwip::BasicWitnessInputProducerLayer, playground::VmPlaygroundLayer, protective_reads::ProtectiveReadsWriterLayer, @@ -72,7 +69,9 @@ use zksync_node_framework::{ }, service::{ZkStackService, ZkStackServiceBuilder}, }; -use zksync_types::{settlement::SettlementMode, SHARED_BRIDGE_ETHER_TOKEN_ADDRESS}; +use zksync_types::{ + pubdata_da::PubdataSendingMode, settlement::SettlementMode, SHARED_BRIDGE_ETHER_TOKEN_ADDRESS, +}; use zksync_vlog::prometheus::PrometheusExporterConfig; /// Macro that looks into a path to fetch an optional config, @@ -192,7 +191,7 @@ impl MainNodeBuilder { .add_layer(BaseTokenRatioProviderLayer::new(base_token_adjuster_config)); } let state_keeper_config = try_load_config!(self.configs.state_keeper_config); - let l1_gas_layer = L1GasLayer::new(state_keeper_config); + let l1_gas_layer = L1GasLayer::new(&state_keeper_config); self.node.add_layer(l1_gas_layer); Ok(self) } @@ -247,9 +246,9 @@ impl MainNodeBuilder { self.genesis_config.l2_chain_id, sk_config.clone(), try_load_config!(self.configs.mempool_config), - self.contracts_config.clone(), - self.genesis_config.clone(), try_load_config!(wallets.state_keeper), + self.contracts_config.l2_da_validator_addr, + self.genesis_config.l1_batch_commit_data_generator_mode, ); let db_config = try_load_config!(self.configs.db_config); let experimental_vm_config = self @@ -296,6 +295,7 @@ impl MainNodeBuilder { self.node.add_layer(ProofDataHandlerLayer::new( try_load_config!(self.configs.proof_data_handler_config), self.genesis_config.l1_batch_commit_data_generator_mode, + self.genesis_config.l2_chain_id, )); Ok(self) } @@ -313,6 +313,7 @@ impl MainNodeBuilder { factory_deps_cache_size: rpc_config.factory_deps_cache_size() as u64, initial_writes_cache_size: rpc_config.initial_writes_cache_size() as u64, latest_values_cache_size: rpc_config.latest_values_cache_size() as u64, + latest_values_max_block_lag: rpc_config.latest_values_max_block_lag(), }; // On main node we always use master pool sink. @@ -500,14 +501,6 @@ impl MainNodeBuilder { Ok(self) } - fn add_tee_verifier_input_producer_layer(mut self) -> anyhow::Result { - self.node.add_layer(TeeVerifierInputProducerLayer::new( - self.genesis_config.l2_chain_id, - )); - - Ok(self) - } - fn add_da_client_layer(mut self) -> anyhow::Result { let Some(da_client_config) = self.configs.da_client_config.clone() else { tracing::warn!("No config for DA client, using the NoDA client"); @@ -563,24 +556,8 @@ impl MainNodeBuilder { fn add_external_api_client_layer(mut self) -> anyhow::Result { let config = try_load_config!(self.configs.external_price_api_client_config); - match config.source.as_str() { - CoingeckoClientLayer::CLIENT_NAME => { - self.node.add_layer(CoingeckoClientLayer::new(config)); - } - NoOpExternalPriceApiClientLayer::CLIENT_NAME => { - self.node.add_layer(NoOpExternalPriceApiClientLayer); - } - ForcedPriceClientLayer::CLIENT_NAME => { - self.node.add_layer(ForcedPriceClientLayer::new(config)); - } - _ => { - anyhow::bail!( - "Unknown external price API client source: {}", - config.source - ); - } - } - + self.node + .add_layer(ExternalPriceApiLayer::try_from(config)?); Ok(self) } @@ -750,9 +727,6 @@ impl MainNodeBuilder { Component::EthTxManager => { self = self.add_eth_tx_manager_layer()?; } - Component::TeeVerifierInputProducer => { - self = self.add_tee_verifier_input_producer_layer()?; - } Component::Housekeeper => { self = self .add_house_keeper_layer()? diff --git a/core/bin/zksync_tee_prover/Cargo.toml b/core/bin/zksync_tee_prover/Cargo.toml index 85908eebeaa..b853da348ee 100644 --- a/core/bin/zksync_tee_prover/Cargo.toml +++ b/core/bin/zksync_tee_prover/Cargo.toml @@ -15,7 +15,7 @@ publish = false anyhow.workspace = true async-trait.workspace = true envy.workspace = true -reqwest.workspace = true +reqwest = { workspace = true, features = ["zstd"] } secp256k1 = { workspace = true, features = ["serde"] } serde = { workspace = true, features = ["derive"] } thiserror.workspace = true diff --git a/core/bin/zksync_tee_prover/src/api_client.rs b/core/bin/zksync_tee_prover/src/api_client.rs index 13fbc1ba886..ffc2839b8d3 100644 --- a/core/bin/zksync_tee_prover/src/api_client.rs +++ b/core/bin/zksync_tee_prover/src/api_client.rs @@ -1,13 +1,10 @@ -use reqwest::Client; +use reqwest::{Client, Response, StatusCode}; use secp256k1::{ecdsa::Signature, PublicKey}; -use serde::{de::DeserializeOwned, Serialize}; +use serde::Serialize; use url::Url; use zksync_basic_types::H256; use zksync_prover_interface::{ - api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitTeeProofRequest, - SubmitTeeProofResponse, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, - }, + api::{RegisterTeeAttestationRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest}, inputs::TeeVerifierInput, outputs::L1BatchTeeProofForL1, }; @@ -31,10 +28,9 @@ impl TeeApiClient { } } - async fn post(&self, endpoint: S, request: Req) -> Result + async fn post(&self, endpoint: S, request: Req) -> Result where Req: Serialize + std::fmt::Debug, - Resp: DeserializeOwned, S: AsRef, { let url = self.api_base_url.join(endpoint.as_ref()).unwrap(); @@ -46,9 +42,7 @@ impl TeeApiClient { .json(&request) .send() .await? - .error_for_status()? - .json::() - .await + .error_for_status() } /// Registers the attestation quote with the TEE prover interface API, effectively proving that @@ -63,8 +57,7 @@ impl TeeApiClient { attestation: attestation_quote_bytes, pubkey: public_key.serialize().to_vec(), }; - self.post::<_, RegisterTeeAttestationResponse, _>("/tee/register_attestation", request) - .await?; + self.post("/tee/register_attestation", request).await?; tracing::info!( "Attestation quote was successfully registered for the public key {}", public_key @@ -77,12 +70,17 @@ impl TeeApiClient { pub async fn get_job( &self, tee_type: TeeType, - ) -> Result>, TeeProverError> { + ) -> Result, TeeProverError> { let request = TeeProofGenerationDataRequest { tee_type }; - let response = self - .post::<_, TeeProofGenerationDataResponse, _>("/tee/proof_inputs", request) - .await?; - Ok(response.0) + let response = self.post("/tee/proof_inputs", request).await?; + match response.status() { + StatusCode::OK => Ok(Some(response.json::().await?)), + StatusCode::NO_CONTENT => Ok(None), + _ => response + .json::>() + .await + .map_err(TeeProverError::Request), + } } /// Submits the successfully verified proof to the TEE prover interface API. @@ -101,7 +99,7 @@ impl TeeApiClient { tee_type, })); let observer = METRICS.proof_submitting_time.start(); - self.post::<_, SubmitTeeProofResponse, _>( + self.post( format!("/tee/submit_proofs/{batch_number}").as_str(), request, ) diff --git a/core/bin/zksync_tee_prover/src/main.rs b/core/bin/zksync_tee_prover/src/main.rs index 70c6f888185..aa0881011da 100644 --- a/core/bin/zksync_tee_prover/src/main.rs +++ b/core/bin/zksync_tee_prover/src/main.rs @@ -45,11 +45,12 @@ fn main() -> anyhow::Result<()> { .add_layer(SigintHandlerLayer) .add_layer(TeeProverLayer::new(tee_prover_config)); - if let Some(gateway) = prometheus_config.gateway_endpoint() { - let exporter_config = - PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()); - builder.add_layer(PrometheusExporterLayer(exporter_config)); - } + let exporter_config = if let Some(gateway) = prometheus_config.gateway_endpoint() { + PrometheusExporterConfig::push(gateway, prometheus_config.push_interval()) + } else { + PrometheusExporterConfig::pull(prometheus_config.listener_port) + }; + builder.add_layer(PrometheusExporterLayer(exporter_config)); builder.build().run(observability_guard)?; Ok(()) diff --git a/core/bin/zksync_tee_prover/src/metrics.rs b/core/bin/zksync_tee_prover/src/metrics.rs index 9f535967f79..769a8bbc7e0 100644 --- a/core/bin/zksync_tee_prover/src/metrics.rs +++ b/core/bin/zksync_tee_prover/src/metrics.rs @@ -2,7 +2,7 @@ use std::time::Duration; -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; +use vise::{Buckets, Counter, Gauge, Histogram, Metrics, Unit}; #[derive(Debug, Metrics)] #[metrics(prefix = "tee_prover")] @@ -13,7 +13,7 @@ pub(crate) struct TeeProverMetrics { pub proof_generation_time: Histogram, #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] pub proof_submitting_time: Histogram, - pub network_errors_counter: Gauge, + pub network_errors_counter: Counter, pub last_batch_number_processed: Gauge, } diff --git a/core/bin/zksync_tee_prover/src/tee_prover.rs b/core/bin/zksync_tee_prover/src/tee_prover.rs index 1511f0c88e3..5d22d1e7c63 100644 --- a/core/bin/zksync_tee_prover/src/tee_prover.rs +++ b/core/bin/zksync_tee_prover/src/tee_prover.rs @@ -90,9 +90,9 @@ impl TeeProver { } async fn step(&self, public_key: &PublicKey) -> Result, TeeProverError> { - match self.api_client.get_job(self.config.tee_type).await? { - Some(job) => { - let (signature, batch_number, root_hash) = self.verify(*job)?; + match self.api_client.get_job(self.config.tee_type).await { + Ok(Some(job)) => { + let (signature, batch_number, root_hash) = self.verify(job)?; self.api_client .submit_proof( batch_number, @@ -104,10 +104,11 @@ impl TeeProver { .await?; Ok(Some(batch_number)) } - None => { + Ok(None) => { tracing::trace!("There are currently no pending batches to be proven"); Ok(None) } + Err(err) => Err(err), } } } @@ -154,7 +155,7 @@ impl Task for TeeProver { } } Err(err) => { - METRICS.network_errors_counter.inc_by(1); + METRICS.network_errors_counter.inc(); if !err.is_retriable() || retries > config.max_retries { return Err(err.into()); } diff --git a/core/lib/basic_types/src/api_key.rs b/core/lib/basic_types/src/api_key.rs new file mode 100644 index 00000000000..eadf4e9051b --- /dev/null +++ b/core/lib/basic_types/src/api_key.rs @@ -0,0 +1,20 @@ +use std::str::FromStr; + +use secrecy::{ExposeSecret, Secret}; + +#[derive(Debug, Clone)] +pub struct APIKey(pub Secret); + +impl PartialEq for APIKey { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for APIKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(APIKey(s.parse()?)) + } +} diff --git a/core/lib/basic_types/src/commitment.rs b/core/lib/basic_types/src/commitment.rs index 56d36b22aff..0eed46aad78 100644 --- a/core/lib/basic_types/src/commitment.rs +++ b/core/lib/basic_types/src/commitment.rs @@ -1,13 +1,12 @@ use std::str::FromStr; -use ethabi::Address; use serde::{Deserialize, Serialize}; use strum::{Display, EnumIter}; use crate::{ ethabi, web3::contract::{Detokenize, Error as ContractError}, - U256, + Address, U256, }; #[derive(Debug, Clone, Copy, PartialEq, Eq, Default, Serialize, Deserialize, EnumIter, Display)] @@ -62,6 +61,5 @@ impl FromStr for L1BatchCommitmentMode { #[derive(Default, Copy, Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct PubdataParams { pub l2_da_validator_address: Address, - // TOODO: maybe rename / use new type. pub pubdata_type: L1BatchCommitmentMode, } diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index b94f7fbcd27..d1180048efb 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -13,6 +13,7 @@ use std::{ str::FromStr, }; +use anyhow::Context as _; pub use ethabi::{ self, ethereum_types::{ @@ -23,11 +24,13 @@ use serde::{de, Deserialize, Deserializer, Serialize}; #[macro_use] mod macros; +pub mod api_key; pub mod basic_fri_types; pub mod commitment; pub mod network; pub mod protocol_version; pub mod prover_dal; +pub mod pubdata_da; pub mod seed_phrase; pub mod settlement; pub mod tee_types; @@ -35,6 +38,21 @@ pub mod url; pub mod vm; pub mod web3; +/// Parses H256 from a slice of bytes. +pub fn parse_h256(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) +} + +/// Parses H256 from an optional slice of bytes. +pub fn parse_h256_opt(bytes: Option<&[u8]>) -> anyhow::Result { + parse_h256(bytes.context("missing data")?) +} + +/// Parses H160 from a slice of bytes. +pub fn parse_h160(bytes: &[u8]) -> anyhow::Result { + Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) +} + /// Account place in the global state tree is uniquely identified by its address. /// Binary this type is represented by 160 bit big-endian representation of account address. #[derive(Debug, Clone, Copy, Eq, PartialEq, Serialize, Deserialize, Hash, Ord, PartialOrd)] diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs index 132b78b51b5..88513360916 100644 --- a/core/lib/basic_types/src/protocol_version.rs +++ b/core/lib/basic_types/src/protocol_version.rs @@ -69,15 +69,17 @@ pub enum ProtocolVersionId { Version24, Version25, Version26, + Version27, + Version28, } impl ProtocolVersionId { pub const fn latest() -> Self { - Self::Version25 + Self::Version27 } pub const fn next() -> Self { - Self::Version26 + Self::Version28 } pub fn try_from_packed_semver(packed_semver: U256) -> Result { @@ -120,8 +122,10 @@ impl ProtocolVersionId { ProtocolVersionId::Version22 => VmVersion::Vm1_4_2, ProtocolVersionId::Version23 => VmVersion::Vm1_5_0SmallBootloaderMemory, ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, - ProtocolVersionId::Version25 => VmVersion::VmGateway, - ProtocolVersionId::Version26 => VmVersion::VmGateway, + ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, + ProtocolVersionId::Version28 => VmVersion::VmGateway, } } @@ -140,7 +144,7 @@ impl ProtocolVersionId { } pub fn is_pre_gateway(&self) -> bool { - self <= &Self::Version24 + self <= &Self::Version26 } pub fn is_1_4_0(&self) -> bool { @@ -280,8 +284,10 @@ impl From for VmVersion { ProtocolVersionId::Version22 => VmVersion::Vm1_4_2, ProtocolVersionId::Version23 => VmVersion::Vm1_5_0SmallBootloaderMemory, ProtocolVersionId::Version24 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, - ProtocolVersionId::Version25 => VmVersion::VmGateway, - ProtocolVersionId::Version26 => VmVersion::VmGateway, + ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory, + ProtocolVersionId::Version27 => VmVersion::VmGateway, + ProtocolVersionId::Version28 => VmVersion::VmGateway, } } } diff --git a/core/lib/types/src/pubdata_da.rs b/core/lib/basic_types/src/pubdata_da.rs similarity index 54% rename from core/lib/types/src/pubdata_da.rs rename to core/lib/basic_types/src/pubdata_da.rs index bc7dc55e53d..3f042da98ac 100644 --- a/core/lib/types/src/pubdata_da.rs +++ b/core/lib/basic_types/src/pubdata_da.rs @@ -1,15 +1,17 @@ +//! Types related to data availability. + use chrono::{DateTime, Utc}; use num_enum::TryFromPrimitive; use serde::{Deserialize, Serialize}; -use zksync_basic_types::L1BatchNumber; -use zksync_config::configs::eth_sender::PubdataSendingMode; + +use crate::L1BatchNumber; /// Enum holding the current values used for DA Layers. #[repr(u8)] -#[derive(Debug, Clone, Copy, Deserialize, PartialEq, Serialize)] -#[derive(TryFromPrimitive)] -pub enum PubdataDA { +#[derive(Debug, Clone, Copy, Default, PartialEq, Deserialize, Serialize, TryFromPrimitive)] +pub enum PubdataSendingMode { /// Pubdata is sent to the L1 as a tx calldata. + #[default] Calldata = 0, /// Pubdata is sent to L1 as EIP-4844 blobs. Blobs, @@ -19,17 +21,6 @@ pub enum PubdataDA { RelayedL2Calldata, } -impl From for PubdataDA { - fn from(value: PubdataSendingMode) -> Self { - match value { - PubdataSendingMode::Calldata => PubdataDA::Calldata, - PubdataSendingMode::Blobs => PubdataDA::Blobs, - PubdataSendingMode::Custom => PubdataDA::Custom, - PubdataSendingMode::RelayedL2Calldata => PubdataDA::RelayedL2Calldata, - } - } -} - /// Represents a blob in the data availability layer. #[derive(Debug, Clone)] pub struct DataAvailabilityBlob { diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs index ecbe73f785b..aa7c4967033 100644 --- a/core/lib/basic_types/src/web3/mod.rs +++ b/core/lib/basic_types/src/web3/mod.rs @@ -21,6 +21,35 @@ mod tests; pub type Index = U64; +/// Number that can be either hex-encoded or decimal. +#[derive(Debug, Clone, Copy, Serialize, Deserialize)] +#[serde(untagged)] +pub enum U64Number { + Hex(U64), + Number(u64), +} + +impl From for u64 { + fn from(value: U64Number) -> Self { + match value { + U64Number::Hex(number) => number.as_u64(), + U64Number::Number(number) => number, + } + } +} + +impl From for U64Number { + fn from(value: u64) -> Self { + Self::Number(value) + } +} + +impl From for U64Number { + fn from(value: U64) -> Self { + Self::Hex(value) + } +} + // `Signature`, `keccak256`: from `web3::signing` /// A struct that represents the components of a secp256k1 signature. diff --git a/core/lib/basic_types/src/web3/tests.rs b/core/lib/basic_types/src/web3/tests.rs index 7f85bf12eb8..70805ab8b39 100644 --- a/core/lib/basic_types/src/web3/tests.rs +++ b/core/lib/basic_types/src/web3/tests.rs @@ -128,3 +128,13 @@ fn test_bytes_serde_json() { let decoded: Bytes = serde_json::from_str(&encoded).unwrap(); assert_eq!(original, decoded); } + +#[test] +fn deserializing_u64_number() { + let number: U64Number = serde_json::from_value(serde_json::json!(123)).unwrap(); + assert_eq!(u64::from(number), 123); + let number: U64Number = serde_json::from_value(serde_json::json!("0x123")).unwrap(); + assert_eq!(u64::from(number), 0x123); + let number: U64Number = serde_json::from_value(serde_json::json!("123")).unwrap(); + assert_eq!(u64::from(number), 0x123); +} diff --git a/core/lib/config/Cargo.toml b/core/lib/config/Cargo.toml index d1ab5ce8438..af39e5159ba 100644 --- a/core/lib/config/Cargo.toml +++ b/core/lib/config/Cargo.toml @@ -23,6 +23,10 @@ anyhow.workspace = true rand.workspace = true secrecy.workspace = true serde = { workspace = true, features = ["derive"] } +time = { workspace = true, features = ["serde-human-readable"] } +strum.workspace = true +strum_macros.workspace = true +vise.workspace = true [dev-dependencies] serde_json.workspace = true diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index dab4c4fa037..21cf44cc073 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -189,6 +189,10 @@ pub struct Web3JsonRpcConfig { /// Latest values cache size in MiBs. The default value is 128 MiB. If set to 0, the latest /// values cache will be disabled. pub latest_values_cache_size_mb: Option, + /// Maximum lag in the number of blocks for the latest values cache after which the cache is reset. Greater values + /// lead to increased the cache update latency, i.e., less storage queries being processed by the cache. OTOH, smaller values + /// can lead to spurious resets when Postgres lags for whatever reason (e.g., when sealing L1 batches). + pub latest_values_max_block_lag: Option, /// Limit for fee history block range. pub fee_history_limit: Option, /// Maximum number of requests in a single batch JSON RPC request. Default is 500. @@ -246,20 +250,21 @@ impl Web3JsonRpcConfig { estimate_gas_acceptable_overestimation: 1000, estimate_gas_optimize_search: false, max_tx_size: 1000000, - vm_execution_cache_misses_limit: Default::default(), - vm_concurrency_limit: Default::default(), - factory_deps_cache_size_mb: Default::default(), - initial_writes_cache_size_mb: Default::default(), - latest_values_cache_size_mb: Default::default(), - fee_history_limit: Default::default(), - max_batch_request_size: Default::default(), - max_response_body_size_mb: Default::default(), + vm_execution_cache_misses_limit: None, + vm_concurrency_limit: None, + factory_deps_cache_size_mb: None, + initial_writes_cache_size_mb: None, + latest_values_cache_size_mb: None, + latest_values_max_block_lag: None, + fee_history_limit: None, + max_batch_request_size: None, + max_response_body_size_mb: None, max_response_body_size_overrides_mb: MaxResponseSizeOverrides::empty(), - websocket_requests_per_minute_limit: Default::default(), - mempool_cache_update_interval: Default::default(), - mempool_cache_size: Default::default(), + websocket_requests_per_minute_limit: None, + mempool_cache_update_interval: None, + mempool_cache_size: None, tree_api_url: None, - whitelisted_tokens_for_aa: Default::default(), + whitelisted_tokens_for_aa: vec![], api_namespaces: None, extended_api_tracing: false, settlement_layer_url: None, @@ -312,6 +317,11 @@ impl Web3JsonRpcConfig { self.latest_values_cache_size_mb.unwrap_or(128) * super::BYTES_IN_MEGABYTE } + /// Returns the maximum lag in the number of blocks for the latest values cache. + pub fn latest_values_max_block_lag(&self) -> u32 { + self.latest_values_max_block_lag.map_or(20, NonZeroU32::get) + } + pub fn fee_history_limit(&self) -> u64 { self.fee_history_limit.unwrap_or(1024) } diff --git a/core/lib/config/src/configs/chain.rs b/core/lib/config/src/configs/chain.rs index 7e33f6964bb..c117064dbc4 100644 --- a/core/lib/config/src/configs/chain.rs +++ b/core/lib/config/src/configs/chain.rs @@ -138,6 +138,8 @@ pub struct StateKeeperConfig { pub bootloader_hash: Option, #[deprecated(note = "Use GenesisConfig::default_aa_hash instead")] pub default_aa_hash: Option, + #[deprecated(note = "Use GenesisConfig::evm_emulator_hash instead")] + pub evm_emulator_hash: Option, #[deprecated(note = "Use GenesisConfig::l1_batch_commit_data_generator_mode instead")] #[serde(default)] pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, @@ -178,6 +180,7 @@ impl StateKeeperConfig { protective_reads_persistence_enabled: true, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: L1BatchCommitmentMode::Rollup, } } diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 918d8f4adab..7f5a0f56aa1 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -115,6 +115,7 @@ impl RpcConfig { /// Config (shared between main node and external node). #[derive(Clone, Debug, PartialEq)] pub struct ConsensusConfig { + pub port: Option, /// Local socket address to listen for the incoming connections. pub server_addr: std::net::SocketAddr, /// Public address of this node (should forward to `server_addr`) diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs index 24f50243758..1d49a09d213 100644 --- a/core/lib/config/src/configs/contracts.rs +++ b/core/lib/config/src/configs/contracts.rs @@ -45,16 +45,12 @@ pub struct ContractsConfig { pub ecosystem_contracts: Option, // Used by the RPC API and by the node builder in wiring the BaseTokenRatioProvider layer. pub base_token_addr: Option
, - // FIXME: maybe refactor pub user_facing_bridgehub_proxy_addr: Option
, pub user_facing_diamond_proxy_addr: Option
, - - pub l2_da_validator_addr: Option
, - pub chain_admin_addr: Option
, - pub settlement_layer: Option, + pub l2_da_validator_addr: Option
, } impl ContractsConfig { @@ -68,7 +64,7 @@ impl ContractsConfig { l2_erc20_bridge_addr: Some(Address::repeat_byte(0x0c)), l1_shared_bridge_proxy_addr: Some(Address::repeat_byte(0x0e)), l2_shared_bridge_addr: Some(Address::repeat_byte(0x0f)), - l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(0xff)), + l2_legacy_shared_bridge_addr: Some(Address::repeat_byte(0x19)), l1_weth_bridge_proxy_addr: Some(Address::repeat_byte(0x0b)), l2_weth_bridge_addr: Some(Address::repeat_byte(0x0c)), l2_testnet_paymaster_addr: Some(Address::repeat_byte(0x11)), @@ -79,8 +75,8 @@ impl ContractsConfig { user_facing_bridgehub_proxy_addr: Some(Address::repeat_byte(0x15)), user_facing_diamond_proxy_addr: Some(Address::repeat_byte(0x16)), chain_admin_addr: Some(Address::repeat_byte(0x18)), - l2_da_validator_addr: Some(Address::repeat_byte(0x19)), settlement_layer: Some(0), + l2_da_validator_addr: Some(Address::repeat_byte(0x1a)), } } } diff --git a/core/lib/config/src/configs/da_client/avail.rs b/core/lib/config/src/configs/da_client/avail.rs index 590dc5fef18..b8e9db0f393 100644 --- a/core/lib/config/src/configs/da_client/avail.rs +++ b/core/lib/config/src/configs/da_client/avail.rs @@ -1,16 +1,38 @@ use serde::Deserialize; -use zksync_basic_types::seed_phrase::SeedPhrase; +use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase}; + +pub const AVAIL_GAS_RELAY_CLIENT_NAME: &str = "GasRelay"; +pub const AVAIL_FULL_CLIENT_NAME: &str = "FullClient"; + +#[derive(Clone, Debug, PartialEq, Deserialize)] +#[serde(tag = "avail_client")] +pub enum AvailClientConfig { + FullClient(AvailDefaultConfig), + GasRelay(AvailGasRelayConfig), +} #[derive(Clone, Debug, PartialEq, Deserialize)] pub struct AvailConfig { - pub api_node_url: String, pub bridge_api_url: String, - pub app_id: u32, pub timeout: usize, + #[serde(flatten)] + pub config: AvailClientConfig, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailDefaultConfig { + pub api_node_url: String, + pub app_id: u32, +} + +#[derive(Clone, Debug, PartialEq, Deserialize)] +pub struct AvailGasRelayConfig { + pub gas_relay_api_url: String, pub max_retries: usize, } #[derive(Clone, Debug, PartialEq)] pub struct AvailSecrets { pub seed_phrase: Option, + pub gas_relay_api_key: Option, } diff --git a/core/lib/config/src/configs/en_config.rs b/core/lib/config/src/configs/en_config.rs index 7f130e3539a..4cab47b0779 100644 --- a/core/lib/config/src/configs/en_config.rs +++ b/core/lib/config/src/configs/en_config.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use std::num::{NonZeroU64, NonZeroUsize}; use serde::Deserialize; use zksync_basic_types::{ @@ -19,4 +19,5 @@ pub struct ENConfig { pub main_node_rate_limit_rps: Option, pub gateway_url: Option, + pub bridge_addresses_refresh_interval_sec: Option, } diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 51f7736fbb7..ab12642c7ba 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -2,7 +2,7 @@ use std::time::Duration; use anyhow::Context as _; use serde::Deserialize; -use zksync_basic_types::{settlement::SettlementMode, H256}; +use zksync_basic_types::{pubdata_da::PubdataSendingMode, settlement::SettlementMode, H256}; use zksync_crypto_primitives::K256PrivateKey; use crate::EthWatchConfig; @@ -44,6 +44,7 @@ impl EthConfig { tx_aggregation_only_prove_and_execute: false, ignore_db_nonce: None, priority_tree_start_index: Some(0), + time_in_mempool_in_l1_blocks_cap: 1800, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 1000000000, @@ -81,15 +82,6 @@ pub enum ProofLoadingMode { FriProofFromGcs, } -#[derive(Debug, Deserialize, Clone, Copy, PartialEq, Default)] -pub enum PubdataSendingMode { - #[default] - Calldata, - Blobs, - Custom, - RelayedL2Calldata, -} - #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct SenderConfig { pub aggregated_proof_sizes: Vec, @@ -129,11 +121,13 @@ pub struct SenderConfig { /// special mode specifically for gateway migration to decrease number of non-executed batches #[serde(default = "SenderConfig::default_tx_aggregation_only_prove_and_execute")] pub tx_aggregation_only_prove_and_execute: bool, - /// Used to ignore db nonce check for sender and only use the RPC one. pub ignore_db_nonce: Option, /// Index of the priority operation to start building the `PriorityMerkleTree` from. pub priority_tree_start_index: Option, + /// Cap of time in mempool for price calculations + #[serde(default = "SenderConfig::default_time_in_mempool_in_l1_blocks_cap")] + pub time_in_mempool_in_l1_blocks_cap: u32, } impl SenderConfig { @@ -183,6 +177,13 @@ impl SenderConfig { const fn default_tx_aggregation_only_prove_and_execute() -> bool { false } + + pub const fn default_time_in_mempool_in_l1_blocks_cap() -> u32 { + let blocks_per_hour = 3600 / 12; + // we cap it at 6h to not allow nearly infinite values when a tx is stuck for a long time + // 1,001 ^ 1800 ~= 6, so by default we cap exponential price formula at roughly median * 6 + blocks_per_hour * 6 + } } #[derive(Debug, Deserialize, Copy, Clone, PartialEq, Default)] @@ -192,8 +193,10 @@ pub struct GasAdjusterConfig { /// Number of blocks collected by GasAdjuster from which base_fee median is taken pub max_base_fee_samples: usize, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_a")] pub pricing_formula_parameter_a: f64, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_b")] pub pricing_formula_parameter_b: f64, /// Parameter by which the base fee will be multiplied for internal purposes pub internal_l1_pricing_multiplier: f64, @@ -240,4 +243,12 @@ impl GasAdjusterConfig { pub const fn default_internal_pubdata_pricing_multiplier() -> f64 { 1.0 } + + pub const fn default_pricing_formula_parameter_a() -> f64 { + 1.1 + } + + pub const fn default_pricing_formula_parameter_b() -> f64 { + 1.001 + } } diff --git a/core/lib/config/src/configs/genesis.rs b/core/lib/config/src/configs/genesis.rs index 6c4bacc3a6e..9e1ffbd87cb 100644 --- a/core/lib/config/src/configs/genesis.rs +++ b/core/lib/config/src/configs/genesis.rs @@ -17,6 +17,7 @@ pub struct GenesisConfig { pub genesis_commitment: Option, pub bootloader_hash: Option, pub default_aa_hash: Option, + pub evm_emulator_hash: Option, pub l1_chain_id: L1ChainId, pub sl_chain_id: Option, pub l2_chain_id: L2ChainId, @@ -49,6 +50,7 @@ impl GenesisConfig { genesis_commitment: Some(H256::repeat_byte(0x17)), bootloader_hash: Default::default(), default_aa_hash: Default::default(), + evm_emulator_hash: Default::default(), l1_chain_id: L1ChainId(9), sl_chain_id: None, protocol_version: Some(ProtocolSemanticVersion { diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs index 9ece81dc7cd..ac570589d9c 100644 --- a/core/lib/config/src/configs/mod.rs +++ b/core/lib/config/src/configs/mod.rs @@ -23,7 +23,7 @@ pub use self::{ genesis::GenesisConfig, object_store::ObjectStoreConfig, observability::{ObservabilityConfig, OpentelemetryConfig}, - proof_data_handler::ProofDataHandlerConfig, + proof_data_handler::{ProofDataHandlerConfig, TeeConfig}, prover_job_monitor::ProverJobMonitorConfig, pruning::PruningConfig, secrets::{DatabaseSecrets, L1Secrets, Secrets}, @@ -62,6 +62,7 @@ pub mod house_keeper; pub mod object_store; pub mod observability; pub mod proof_data_handler; +pub mod prover_autoscaler; pub mod prover_job_monitor; pub mod pruning; pub mod secrets; diff --git a/core/lib/config/src/configs/proof_data_handler.rs b/core/lib/config/src/configs/proof_data_handler.rs index de7f6969b05..1094b1bb180 100644 --- a/core/lib/config/src/configs/proof_data_handler.rs +++ b/core/lib/config/src/configs/proof_data_handler.rs @@ -1,12 +1,43 @@ use std::time::Duration; use serde::Deserialize; +use zksync_basic_types::L1BatchNumber; + +#[derive(Debug, Deserialize, Clone, PartialEq)] +pub struct TeeConfig { + /// If true, the TEE support is enabled. + pub tee_support: bool, + /// All batches before this one are considered to be processed. + pub first_tee_processed_batch: L1BatchNumber, +} + +impl Default for TeeConfig { + fn default() -> Self { + TeeConfig { + tee_support: Self::default_tee_support(), + first_tee_processed_batch: Self::default_first_tee_processed_batch(), + } + } +} + +impl TeeConfig { + pub fn default_tee_support() -> bool { + false + } + + pub fn default_first_tee_processed_batch() -> L1BatchNumber { + L1BatchNumber(0) + } +} #[derive(Debug, Deserialize, Clone, PartialEq)] pub struct ProofDataHandlerConfig { pub http_port: u16, pub proof_generation_timeout_in_secs: u16, - pub tee_support: bool, + #[serde(skip)] + // ^ Filled in separately in `Self::from_env()`. We cannot use `serde(flatten)` because it + // doesn't work with `envy`: https://github.com/softprops/envy/issues/26 + pub tee_config: TeeConfig, } impl ProofDataHandlerConfig { diff --git a/core/lib/config/src/configs/prover_autoscaler.rs b/core/lib/config/src/configs/prover_autoscaler.rs new file mode 100644 index 00000000000..b24a1a26651 --- /dev/null +++ b/core/lib/config/src/configs/prover_autoscaler.rs @@ -0,0 +1,128 @@ +use std::collections::HashMap; + +use serde::Deserialize; +use strum::Display; +use strum_macros::EnumString; +use time::Duration; +use vise::EncodeLabelValue; + +use crate::configs::ObservabilityConfig; + +/// Config used for running ProverAutoscaler (both Scaler and Agent). +#[derive(Debug, Clone, PartialEq)] +pub struct ProverAutoscalerConfig { + /// Amount of time ProverJobMonitor will wait all it's tasks to finish. + // TODO: find a way to use #[serde(with = "humantime_serde")] with time::Duration. + pub graceful_shutdown_timeout: Duration, + pub agent_config: Option, + pub scaler_config: Option, + pub observability: Option, +} + +#[derive(Debug, Clone, PartialEq, Deserialize)] +pub struct ProverAutoscalerAgentConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// HTTP port for global Scaler to connect to the Agent running in a cluster. + pub http_port: u16, + /// List of namespaces to watch. + #[serde(default = "ProverAutoscalerAgentConfig::default_namespaces")] + pub namespaces: Vec, + /// Watched cluster name. Also can be set via flag. + pub cluster_name: Option, + /// If dry-run enabled don't do any k8s updates, just report success. + #[serde(default = "ProverAutoscalerAgentConfig::default_dry_run")] + pub dry_run: bool, +} + +#[derive(Debug, Clone, PartialEq, Deserialize, Default)] +pub struct ProverAutoscalerScalerConfig { + /// Port for prometheus metrics connection. + pub prometheus_port: u16, + /// The interval between runs for global Scaler. + #[serde(default = "ProverAutoscalerScalerConfig::default_scaler_run_interval")] + pub scaler_run_interval: Duration, + /// URL to get queue reports from. + /// In production should be "http://prover-job-monitor.stage2.svc.cluster.local:3074/queue_report". + #[serde(default = "ProverAutoscalerScalerConfig::default_prover_job_monitor_url")] + pub prover_job_monitor_url: String, + /// List of ProverAutoscaler Agents to get cluster data from. + pub agents: Vec, + /// Mapping of namespaces to protocol versions. + pub protocol_versions: HashMap, + /// Default priorities, which cluster to prefer when there is no other information. + pub cluster_priorities: HashMap, + /// Prover speed per GPU. Used to calculate desired number of provers for queue size. + pub prover_speed: HashMap, + /// Maximum number of provers which can be run per cluster/GPU. + pub max_provers: HashMap>, + /// Minimum number of provers per namespace. + pub min_provers: HashMap, + /// Duration after which pending pod considered long pending. + #[serde(default = "ProverAutoscalerScalerConfig::default_long_pending_duration")] + pub long_pending_duration: Duration, +} + +#[derive( + Default, + Debug, + Display, + Hash, + PartialEq, + Eq, + Clone, + Copy, + Ord, + PartialOrd, + EnumString, + EncodeLabelValue, + Deserialize, +)] +pub enum Gpu { + #[default] + Unknown, + #[strum(ascii_case_insensitive)] + L4, + #[strum(ascii_case_insensitive)] + T4, + #[strum(ascii_case_insensitive)] + V100, + #[strum(ascii_case_insensitive)] + P100, + #[strum(ascii_case_insensitive)] + A100, +} + +impl ProverAutoscalerConfig { + /// Default graceful shutdown timeout -- 5 seconds + pub fn default_graceful_shutdown_timeout() -> Duration { + Duration::seconds(5) + } +} + +impl ProverAutoscalerAgentConfig { + pub fn default_namespaces() -> Vec { + vec!["prover-blue".to_string(), "prover-red".to_string()] + } + + pub fn default_dry_run() -> bool { + true + } +} + +impl ProverAutoscalerScalerConfig { + /// Default scaler_run_interval -- 10s + pub fn default_scaler_run_interval() -> Duration { + Duration::seconds(10) + } + + /// Default prover_job_monitor_url -- cluster local URL + pub fn default_prover_job_monitor_url() -> String { + "http://localhost:3074/queue_report".to_string() + } + + /// Default long_pending_duration -- 10m + pub fn default_long_pending_duration() -> Duration { + Duration::minutes(10) + } +} diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index 0dca7335d1b..9d72b4ab367 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -3,10 +3,12 @@ use std::num::NonZeroUsize; use rand::{distributions::Distribution, Rng}; use secrecy::Secret; use zksync_basic_types::{ + api_key::APIKey, basic_fri_types::CircuitIdRoundTuple, commitment::L1BatchCommitmentMode, network::Network, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + pubdata_da::PubdataSendingMode, seed_phrase::SeedPhrase, vm::FastVmMode, L1BatchNumber, L1ChainId, L2ChainId, @@ -16,7 +18,11 @@ use zksync_crypto_primitives::K256PrivateKey; use crate::{ configs::{ - self, da_client::DAClientConfig::Avail, eth_sender::PubdataSendingMode, + self, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::Avail, + }, external_price_api_client::ForcedPriceClientConfig, }, AvailConfig, @@ -86,6 +92,7 @@ impl Distribution for EncodeDist { factory_deps_cache_size_mb: self.sample(rng), initial_writes_cache_size_mb: self.sample(rng), latest_values_cache_size_mb: self.sample(rng), + latest_values_max_block_lag: self.sample(rng), fee_history_limit: self.sample(rng), max_batch_request_size: self.sample(rng), max_response_body_size_mb: self.sample(rng), @@ -193,6 +200,7 @@ impl Distribution for EncodeDist { fee_account_addr: None, bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, l1_batch_commit_data_generator_mode: Default::default(), } } @@ -263,10 +271,10 @@ impl Distribution for EncodeDist { ecosystem_contracts: self.sample(rng), user_facing_bridgehub_proxy_addr: rng.gen(), user_facing_diamond_proxy_addr: rng.gen(), - l2_da_validator_addr: rng.gen(), base_token_addr: self.sample_opt(|| rng.gen()), chain_admin_addr: self.sample_opt(|| rng.gen()), settlement_layer: self.sample_opt(|| rng.gen()), + l2_da_validator_addr: self.sample_opt(|| rng.gen()), } } } @@ -392,17 +400,6 @@ impl Distribution for EncodeDist { } } -impl Distribution for EncodeDist { - fn sample(&self, rng: &mut R) -> configs::eth_sender::PubdataSendingMode { - type T = configs::eth_sender::PubdataSendingMode; - match rng.gen_range(0..3) { - 0 => T::Calldata, - 1 => T::Blobs, - _ => T::Custom, - } - } -} - impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::eth_sender::SenderConfig { configs::eth_sender::SenderConfig { @@ -427,6 +424,7 @@ impl Distribution for EncodeDist { tx_aggregation_only_prove_and_execute: false, ignore_db_nonce: None, priority_tree_start_index: self.sample(rng), + time_in_mempool_in_l1_blocks_cap: self.sample(rng), } } } @@ -685,7 +683,10 @@ impl Distribution for EncodeDist { configs::ProofDataHandlerConfig { http_port: self.sample(rng), proof_generation_timeout_in_secs: self.sample(rng), - tee_support: self.sample(rng), + tee_config: configs::TeeConfig { + tee_support: self.sample(rng), + first_tee_processed_batch: L1BatchNumber(rng.gen()), + }, } } } @@ -739,6 +740,7 @@ impl Distribution for EncodeDist { genesis_commitment: Some(rng.gen()), bootloader_hash: Some(rng.gen()), default_aa_hash: Some(rng.gen()), + evm_emulator_hash: Some(rng.gen()), fee_account: rng.gen(), l1_chain_id: L1ChainId(self.sample(rng)), sl_chain_id: None, @@ -807,6 +809,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::ConsensusConfig { use configs::consensus::{ConsensusConfig, Host, NodePublicKey}; ConsensusConfig { + port: self.sample(rng), server_addr: self.sample(rng), public_addr: Host(self.sample(rng)), max_payload_size: self.sample(rng), @@ -943,6 +946,7 @@ impl Distribution for EncodeDist { main_node_rate_limit_rps: self.sample_opt(|| rng.gen()), gateway_url: self .sample_opt(|| format!("localhost:{}", rng.gen::()).parse().unwrap()), + bridge_addresses_refresh_interval_sec: self.sample_opt(|| rng.gen()), } } } @@ -950,11 +954,12 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::da_client::DAClientConfig { Avail(AvailConfig { - api_node_url: self.sample(rng), bridge_api_url: self.sample(rng), - app_id: self.sample(rng), timeout: self.sample(rng), - max_retries: self.sample(rng), + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: self.sample(rng), + app_id: self.sample(rng), + }), }) } } @@ -963,6 +968,7 @@ impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::secrets::DataAvailabilitySecrets { configs::secrets::DataAvailabilitySecrets::Avail(configs::da_client::avail::AvailSecrets { seed_phrase: Some(SeedPhrase(Secret::new(self.sample(rng)))), + gas_relay_api_key: Some(APIKey(Secret::new(self.sample(rng)))), }) } } diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs index 1518d2d9f77..f9138b2bbf1 100644 --- a/core/lib/constants/src/contracts.rs +++ b/core/lib/constants/src/contracts.rs @@ -130,6 +130,11 @@ pub const CODE_ORACLE_ADDRESS: Address = H160([ 0x00, 0x00, 0x80, 0x12, ]); +pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([ + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x80, 0x13, +]); + /// Note, that the `Create2Factory` and higher are explicitly deployed on a non-system-contract address. pub const CREATE2_FACTORY_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, @@ -140,18 +145,22 @@ pub const L2_GENESIS_UPGRADE_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, ]); + pub const L2_BRIDGEHUB_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x02, ]); + pub const L2_ASSET_ROUTER_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x03, ]); + pub const L2_NATIVE_TOKEN_VAULT_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x04, ]); + pub const L2_MESSAGE_ROOT_ADDRESS: Address = H160([ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x05, diff --git a/core/lib/constants/src/lib.rs b/core/lib/constants/src/lib.rs index 6aab79ad71f..30ae6a7b582 100644 --- a/core/lib/constants/src/lib.rs +++ b/core/lib/constants/src/lib.rs @@ -3,6 +3,7 @@ pub mod contracts; pub mod crypto; pub mod ethereum; pub mod fees; +pub mod message_root; pub mod system_context; pub mod system_logs; pub mod trusted_slots; diff --git a/core/lib/constants/src/message_root.rs b/core/lib/constants/src/message_root.rs new file mode 100644 index 00000000000..a8f4a034fb9 --- /dev/null +++ b/core/lib/constants/src/message_root.rs @@ -0,0 +1,5 @@ +// Position of `FullTree::_height` in `MessageRoot`'s storage layout. +pub const AGG_TREE_HEIGHT_KEY: usize = 3; + +// Position of `FullTree::nodes` in `MessageRoot`'s storage layout. +pub const AGG_TREE_NODES_KEY: usize = 5; diff --git a/core/lib/constants/src/system_logs.rs b/core/lib/constants/src/system_logs.rs index e2cc58444f3..aa2c2cc156c 100644 --- a/core/lib/constants/src/system_logs.rs +++ b/core/lib/constants/src/system_logs.rs @@ -2,10 +2,7 @@ pub const L2_TO_L1_LOGS_TREE_ROOT_KEY: u32 = 0; /// The key of the system log with value of the state diff hash for pre-gateway protocol versions -pub const STATE_DIFF_HASH_KEY_PRE_GATEWAY: u64 = 2; +pub const STATE_DIFF_HASH_KEY_PRE_GATEWAY: u32 = 2; -/// The key of the system log with value of the first blob linear hash -pub const BLOB1_LINEAR_HASH_KEY: u32 = 7; - -/// The key of the system log with value of the second blob linear hash -pub const BLOB2_LINEAR_HASH_KEY: u32 = 8; +/// The key of the system log with value of the first blob linear hash for pre-gateway protocol versions +pub const BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY: u32 = 7; diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index 06d6a235337..af9b5fe99f2 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -69,20 +69,21 @@ fn home_path() -> PathBuf { Workspace::locate().core() } -fn read_file_to_json_value(path: impl AsRef + std::fmt::Debug) -> serde_json::Value { +fn read_file_to_json_value(path: impl AsRef + std::fmt::Debug) -> Option { let zksync_home = home_path(); let path = Path::new(&zksync_home).join(path); - let file = - File::open(&path).unwrap_or_else(|e| panic!("Failed to open file {:?}: {}", path, e)); - serde_json::from_reader(BufReader::new(file)) - .unwrap_or_else(|e| panic!("Failed to parse file {:?}: {}", path, e)) + let file = File::open(&path).ok()?; + Some( + serde_json::from_reader(BufReader::new(file)) + .unwrap_or_else(|e| panic!("Failed to parse file {:?}: {}", path, e)), + ) } fn load_contract_if_present + std::fmt::Debug>(path: P) -> Option { let zksync_home = home_path(); let path = Path::new(&zksync_home).join(path); path.exists().then(|| { - serde_json::from_value(read_file_to_json_value(&path)["abi"].take()) + serde_json::from_value(read_file_to_json_value(&path).unwrap()["abi"].take()) .unwrap_or_else(|e| panic!("Failed to parse contract abi from file {:?}: {}", path, e)) }) } @@ -114,24 +115,26 @@ pub fn load_contract + std::fmt::Debug>(path: P) -> Contract { } pub fn load_sys_contract(contract_name: &str) -> Contract { - load_contract(format!( + if let Some(contract) = load_contract_if_present(format!( "contracts/system-contracts/artifacts-zk/contracts-preprocessed/{0}.sol/{0}.json", contract_name - )) -} - -pub fn load_sys_contract_interface(contract_name: &str) -> Contract { - load_contract(format!( - "contracts/system-contracts/artifacts-zk/contracts-preprocessed/interfaces/{0}.sol/{0}.json", - contract_name - )) + )) { + contract + } else { + load_contract(format!( + "contracts/system-contracts/zkout/{0}.sol/{0}.json", + contract_name + )) + } } -pub fn read_contract_abi(path: impl AsRef + std::fmt::Debug) -> String { - read_file_to_json_value(path)["abi"] - .as_str() - .expect("Failed to parse abi") - .to_string() +pub fn read_contract_abi(path: impl AsRef + std::fmt::Debug) -> Option { + Some( + read_file_to_json_value(path)?["abi"] + .as_str() + .expect("Failed to parse abi") + .to_string(), + ) } pub fn bridgehub_contract() -> Contract { @@ -212,12 +215,12 @@ pub fn l2_message_root() -> Contract { } pub fn l2_rollup_da_validator_bytecode() -> Vec { - read_bytecode_from_path("contracts/l2-contracts/artifacts-zk/contracts/data-availability/RollupL2DAValidator.sol/RollupL2DAValidator.json") + read_bytecode("contracts/l2-contracts/artifacts-zk/contracts/data-availability/RollupL2DAValidator.sol/RollupL2DAValidator.json") } /// Reads bytecode from the path RELATIVE to the Cargo workspace location. pub fn read_bytecode(relative_path: impl AsRef + std::fmt::Debug) -> Vec { - read_bytecode_from_path(relative_path) + read_bytecode_from_path(relative_path).expect("Exists") } pub fn eth_contract() -> Contract { @@ -229,17 +232,25 @@ pub fn known_codes_contract() -> Contract { } /// Reads bytecode from a given path. -pub fn read_bytecode_from_path(artifact_path: impl AsRef + std::fmt::Debug) -> Vec { - let artifact = read_file_to_json_value(&artifact_path); - - let bytecode = artifact["bytecode"] - .as_str() - .unwrap_or_else(|| panic!("Bytecode not found in {:?}", artifact_path)) - .strip_prefix("0x") - .unwrap_or_else(|| panic!("Bytecode in {:?} is not hex", artifact_path)); +pub fn read_bytecode_from_path( + artifact_path: impl AsRef + std::fmt::Debug, +) -> Option> { + let artifact = read_file_to_json_value(&artifact_path)?; + + let bytecode = if let Some(bytecode) = artifact["bytecode"].as_str() { + bytecode + .strip_prefix("0x") + .unwrap_or_else(|| panic!("Bytecode in {:?} is not hex", artifact_path)) + } else { + artifact["bytecode"]["object"] + .as_str() + .unwrap_or_else(|| panic!("Bytecode not found in {:?}", artifact_path)) + }; - hex::decode(bytecode) - .unwrap_or_else(|err| panic!("Can't decode bytecode in {:?}: {}", artifact_path, err)) + Some( + hex::decode(bytecode) + .unwrap_or_else(|err| panic!("Can't decode bytecode in {:?}: {}", artifact_path, err)), + ) } pub fn read_sys_contract_bytecode(directory: &str, name: &str, lang: ContractLanguage) -> Vec { @@ -247,7 +258,7 @@ pub fn read_sys_contract_bytecode(directory: &str, name: &str, lang: ContractLan } static DEFAULT_SYSTEM_CONTRACTS_REPO: Lazy = - Lazy::new(SystemContractsRepo::from_env); + Lazy::new(SystemContractsRepo::default); /// Structure representing a system contract repository - that allows /// fetching contracts that are located there. @@ -257,14 +268,16 @@ pub struct SystemContractsRepo { pub root: PathBuf, } -impl SystemContractsRepo { +impl Default for SystemContractsRepo { /// Returns the default system contracts repository with directory based on the Cargo workspace location. - pub fn from_env() -> Self { + fn default() -> Self { SystemContractsRepo { root: home_path().join("contracts/system-contracts"), } } +} +impl SystemContractsRepo { pub fn read_sys_contract_bytecode( &self, directory: &str, @@ -272,23 +285,62 @@ impl SystemContractsRepo { lang: ContractLanguage, ) -> Vec { match lang { - ContractLanguage::Sol => read_bytecode_from_path(self.root.join(format!( - "artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json", - directory, name - ))), - ContractLanguage::Yul => read_zbin_bytecode_from_path(self.root.join(format!( - "contracts-preprocessed/{0}artifacts/{1}.yul.zbin", - directory, name - ))), + ContractLanguage::Sol => { + if let Some(contracts) = read_bytecode_from_path( + self.root + .join(format!("zkout/{0}{1}.sol/{1}.json", directory, name)), + ) { + contracts + } else { + read_bytecode_from_path(self.root.join(format!( + "artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json", + directory, name + ))) + .unwrap_or_else(|| { + panic!("One of the outputs should exists for {directory}{name}"); + }) + } + } + ContractLanguage::Yul => { + if let Some(contract) = read_bytecode_from_path(self.root.join(format!( + "zkout/{name}.yul/contracts-preprocessed/{directory}/{name}.yul.json", + ))) { + contract + } else { + read_yul_bytecode_by_path( + self.root + .join(format!("contracts-preprocessed/{directory}artifacts")), + name, + ) + } + } } } } pub fn read_bootloader_code(bootloader_type: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/build/artifacts/{}.yul.zbin", - bootloader_type - )) + if let Some(contract) = + read_bytecode_from_path(home_path().join("contracts/system-contracts").join(format!( + "zkout/{bootloader_type}.yul/contracts-preprocessed/bootloader/{bootloader_type}.yul.json", + ))) + { + return contract; + }; + + let artifacts_path = + Path::new(&home_path()).join("contracts/system-contracts/bootloader/build/artifacts"); + let bytecode_path = artifacts_path.join(format!("{bootloader_type}.yul.zbin")); + if fs::exists(bytecode_path).unwrap_or_default() { + read_yul_bytecode( + "contracts/system-contracts/bootloader/build/artifacts", + bootloader_type, + ) + } else { + read_yul_bytecode( + "contracts/system-contracts/bootloader/tests/artifacts", + bootloader_type, + ) + } } fn read_proved_batch_bootloader_bytecode() -> Vec { @@ -305,11 +357,48 @@ pub fn read_zbin_bytecode(relative_zbin_path: impl AsRef) -> Vec { read_zbin_bytecode_from_path(bytecode_path) } +pub fn read_yul_bytecode(relative_artifacts_path: &str, name: &str) -> Vec { + let artifacts_path = Path::new(&home_path()).join(relative_artifacts_path); + read_yul_bytecode_by_path(artifacts_path, name) +} + +pub fn read_yul_bytecode_by_path(artifacts_path: PathBuf, name: &str) -> Vec { + let bytecode_path = artifacts_path.join(format!("{name}.yul/{name}.yul.zbin")); + + // Legacy versions of zksolc use the following path for output data if a yul file is being compiled: .yul.zbin + // New zksolc versions use .yul/.yul.zbin, for consistency with solidity files compilation. + // In addition, the output of the legacy zksolc in this case is a binary file, while in new versions it is hex encoded. + if fs::exists(&bytecode_path) + .unwrap_or_else(|err| panic!("Invalid path: {bytecode_path:?}, {err}")) + { + read_zbin_bytecode_from_hex_file(bytecode_path) + } else { + let bytecode_path_legacy = artifacts_path.join(format!("{name}.yul.zbin")); + + if fs::exists(&bytecode_path_legacy) + .unwrap_or_else(|err| panic!("Invalid path: {bytecode_path_legacy:?}, {err}")) + { + read_zbin_bytecode_from_path(bytecode_path_legacy) + } else { + panic!("Can't find bytecode for '{name}' yul contract at {artifacts_path:?}") + } + } +} + /// Reads zbin bytecode from a given path. fn read_zbin_bytecode_from_path(bytecode_path: PathBuf) -> Vec { fs::read(&bytecode_path) - .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {:?}: {}", bytecode_path, err)) + .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {bytecode_path:?}: {err}")) +} + +/// Reads zbin bytecode from a given path as utf8 text file. +fn read_zbin_bytecode_from_hex_file(bytecode_path: PathBuf) -> Vec { + let bytes = fs::read(&bytecode_path) + .unwrap_or_else(|err| panic!("Can't read .zbin bytecode at {bytecode_path:?}: {err}")); + + hex::decode(bytes).unwrap_or_else(|err| panic!("Invalid input file: {bytecode_path:?}, {err}")) } + /// Hash of code and code which consists of 32 bytes words #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SystemContractCode { @@ -321,18 +410,23 @@ pub struct SystemContractCode { pub struct BaseSystemContracts { pub bootloader: SystemContractCode, pub default_aa: SystemContractCode, + /// Never filled in constructors for now. The only way to get the EVM emulator enabled is to call [`Self::with_evm_emulator()`]. + pub evm_emulator: Option, } #[derive(Debug, Clone, Copy, Default, Serialize, Deserialize, PartialEq)] pub struct BaseSystemContractsHashes { pub bootloader: H256, pub default_aa: H256, + pub evm_emulator: Option, } impl PartialEq for BaseSystemContracts { fn eq(&self, other: &Self) -> bool { self.bootloader.hash == other.bootloader.hash && self.default_aa.hash == other.default_aa.hash + && self.evm_emulator.as_ref().map(|contract| contract.hash) + == other.evm_emulator.as_ref().map(|contract| contract.hash) } } @@ -356,14 +450,27 @@ impl BaseSystemContracts { BaseSystemContracts { bootloader, default_aa, + evm_emulator: None, } } - // BaseSystemContracts with proved bootloader - for handling transactions. + + /// BaseSystemContracts with proved bootloader - for handling transactions. pub fn load_from_disk() -> Self { let bootloader_bytecode = read_proved_batch_bootloader_bytecode(); BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + /// Loads the latest EVM emulator for these base system contracts. Logically, it only makes sense to do for the latest protocol version. + pub fn with_latest_evm_emulator(mut self) -> Self { + let bytecode = read_sys_contract_bytecode("", "EvmEmulator", ContractLanguage::Yul); + let hash = hash_bytecode(&bytecode); + self.evm_emulator = Some(SystemContractCode { + code: bytes_to_be_words(bytecode), + hash, + }); + self + } + /// BaseSystemContracts with playground bootloader - used for handling eth_calls. pub fn playground() -> Self { let bootloader_bytecode = read_playground_batch_bootloader_bytecode(); @@ -425,6 +532,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn playground_post_protocol_defense() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn playground_gateway() -> Self { let bootloader_bytecode = read_zbin_bytecode( "contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin", @@ -496,6 +610,13 @@ impl BaseSystemContracts { BaseSystemContracts::load_with_bootloader(bootloader_bytecode) } + pub fn estimate_gas_post_protocol_defense() -> Self { + let bootloader_bytecode = read_zbin_bytecode( + "etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin", + ); + BaseSystemContracts::load_with_bootloader(bootloader_bytecode) + } + pub fn estimate_gas_gateway() -> Self { let bootloader_bytecode = read_zbin_bytecode( "contracts/system-contracts/bootloader/build/artifacts/fee_estimate.yul.zbin", @@ -508,6 +629,7 @@ impl BaseSystemContracts { BaseSystemContractsHashes { bootloader: self.bootloader.hash, default_aa: self.default_aa.hash, + evm_emulator: self.evm_emulator.as_ref().map(|contract| contract.hash), } } } diff --git a/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json new file mode 100644 index 00000000000..189e28f565d --- /dev/null +++ b/core/lib/dal/.sqlx/query-0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69.json @@ -0,0 +1,40 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n index_in_block,\n miniblocks.number AS \"miniblock_number!\",\n miniblocks.hash AS \"miniblocks_hash!\"\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "miniblock_number!", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "miniblocks_hash!", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + true, + true, + false, + false + ] + }, + "hash": "0237d9a26654e7c409785c73c2b16fe37110ebc3fb3981b2626a0bf2edd00e69" +} diff --git a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json b/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json deleted file mode 100644 index 05b94ad249a..00000000000 --- a/core/lib/dal/.sqlx/query-0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n input_blob_url = $4\n WHERE\n l1_batch_number = $2\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text" - ] - }, - "nullable": [] - }, - "hash": "0fbdf8da9a000c433c5475d57f6ad2574cd1310dff1d1bf06825d5634ba25f04" -} diff --git a/core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json b/core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json similarity index 53% rename from core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json rename to core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json index 9cf4cc1e68e..36879466039 100644 --- a/core/lib/dal/.sqlx/query-1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03.json +++ b/core/lib/dal/.sqlx/query-16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n is_sealed\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ true ] }, - "hash": "1ed2d7e5e98b15420a21650809d710ce910d0c9138d85cb55e16459c757dea03" + "hash": "16ea3cd5a006576fa1ab5895212098869d490ea0a96aa9d5b9a22b34ab0f8f47" } diff --git a/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json b/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json new file mode 100644 index 00000000000..b40bdca666b --- /dev/null +++ b/core/lib/dal/.sqlx/query-180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n DELETE FROM l1_batches\n WHERE\n number > $1\n AND NOT is_sealed\n RETURNING number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "180cc8d88563a42423ca1d4b92181f4625ebd593aa4cd2bae79bcc0637387d78" +} diff --git a/core/lib/dal/.sqlx/query-ce6d5796dcc7c105fe3b3081b70327982ab744c7566645e9b0c69364f7021c5a.json b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json similarity index 75% rename from core/lib/dal/.sqlx/query-ce6d5796dcc7c105fe3b3081b70327982ab744c7566645e9b0c69364f7021c5a.json rename to core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json index c3a238fb8ba..48adcd41267 100644 --- a/core/lib/dal/.sqlx/query-ce6d5796dcc7c105fe3b3081b70327982ab744c7566645e9b0c69364f7021c5a.json +++ b/core/lib/dal/.sqlx/query-1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = 0\n OR eth_commit_tx_id IS NOT NULL\n AND commitment IS NOT NULL\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -180,15 +190,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "ce6d5796dcc7c105fe3b3081b70327982ab744c7566645e9b0c69364f7021c5a" + "hash": "1cb61327bed4d65a3fc81aa2229e01396dacefc0cea8cbcf5807185eb00fc0f7" } diff --git a/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json b/core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json similarity index 73% rename from core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json rename to core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json index 853acb9f71a..a101edbb9ea 100644 --- a/core/lib/dal/.sqlx/query-cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a.json +++ b/core/lib/dal/.sqlx/query-1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n storage_refunds,\n pubdata_costs\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n storage_refunds,\n pubdata_costs\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ true ] }, - "hash": "cf20dfb2b3d6a770b6f56417d407ad3caf76ed9fed031da9e04313073af2fb4a" + "hash": "1eb34ecfbe49d5ba063a8f8842eced820f8869acb6f59aa6dd704c0f5b4e45ec" } diff --git a/core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json b/core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json similarity index 71% rename from core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json rename to core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json index fc36e47b54c..1078e0b57f6 100644 --- a/core/lib/dal/.sqlx/query-a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea.json +++ b/core/lib/dal/.sqlx/query-1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n initial_bootloader_heap_content\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n initial_bootloader_heap_content\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "a2d02b71e3dcc29a2c0c20b44392cfbaf09164aecfa5eed8d7142518ad96abea" + "hash": "1ec14bf6f71bbab04275ffd90bc17791290f3bfff4de742f2a918a3fd4e5608c" } diff --git a/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json b/core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json similarity index 62% rename from core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json rename to core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json index 433564c6ae0..aa657582690 100644 --- a/core/lib/dal/.sqlx/query-c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5.json +++ b/core/lib/dal/.sqlx/query-1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n (\n SELECT\n l1_batch_number\n FROM\n miniblocks\n WHERE\n number = $1\n ) AS \"block_batch?\",\n COALESCE(\n (\n SELECT\n MAX(number) + 1\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n ),\n 0\n ) AS \"pending_batch!\"\n ", + "query": "\n SELECT\n (\n SELECT\n l1_batch_number\n FROM\n miniblocks\n WHERE\n number = $1\n ) AS \"block_batch?\",\n COALESCE(\n (\n SELECT\n MAX(number) + 1\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n ),\n 0\n ) AS \"pending_batch!\"\n ", "describe": { "columns": [ { @@ -24,5 +24,5 @@ null ] }, - "hash": "c9e05ebc7b61c1f409c330bc110bed26c831730944237b74bed98869c83b3ca5" + "hash": "1fa64372eff16b29f9694e54ac7ef7cd9c8486d1613319e1f6bc038ddff539f8" } diff --git a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json similarity index 50% rename from core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json rename to core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json index 06d3461c3fa..4f44879b6ec 100644 --- a/core/lib/dal/.sqlx/query-894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2.json +++ b/core/lib/dal/.sqlx/query-2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850.json @@ -1,12 +1,17 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n protocol_version,\n hash\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { "ordinal": 0, "name": "protocol_version", "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "hash", + "type_info": "Bytea" } ], "parameters": { @@ -15,8 +20,9 @@ ] }, "nullable": [ - true + true, + false ] }, - "hash": "894665c2c467bd1aaeb331b112c567e2667c63a033baa6b427bd8a0898c08bf2" + "hash": "2076bee41f2db1534bb7e15043629027b18d108a05f5855115ba36045e3b1850" } diff --git a/core/lib/dal/.sqlx/query-269e5901aaa362ed011a2e968d2bc8cc8877e5d1d9c2d9b04953fa7d89155b40.json b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json similarity index 70% rename from core/lib/dal/.sqlx/query-269e5901aaa362ed011a2e968d2bc8cc8877e5d1d9c2d9b04953fa7d89155b40.json rename to core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json index 2b094a5f24f..5c4ce3d6a4e 100644 --- a/core/lib/dal/.sqlx/query-269e5901aaa362ed011a2e968d2bc8cc8877e5d1d9c2d9b04953fa7d89155b40.json +++ b/core/lib/dal/.sqlx/query-250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n l2_da_validator_address AS \"l2_da_validator_address!\",\n pubdata_type AS \"pubdata_type!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -35,68 +35,73 @@ }, { "ordinal": 6, - "name": "l2_da_validator_address!", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "pubdata_type!", - "type_info": "Text" - }, - { - "ordinal": 8, "name": "base_fee_per_gas", "type_info": "Numeric" }, { - "ordinal": 9, + "ordinal": 7, "name": "l1_gas_price", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 8, "name": "l2_fair_gas_price", "type_info": "Int8" }, { - "ordinal": 11, + "ordinal": 9, "name": "gas_per_pubdata_limit", "type_info": "Int8" }, { - "ordinal": 12, + "ordinal": 10, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 11, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 15, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 17, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 18, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -113,16 +118,17 @@ false, false, false, - false, - false, + true, true, true, true, false, true, true, - true + true, + false, + false ] }, - "hash": "269e5901aaa362ed011a2e968d2bc8cc8877e5d1d9c2d9b04953fa7d89155b40" + "hash": "250cc655f48144137906a72490680cc9e176729744c779fee97ca9392ae8a8c8" } diff --git a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json b/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json deleted file mode 100644 index 9d8cc36189f..00000000000 --- a/core/lib/dal/.sqlx/query-2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $1\n AND proofs.status = $2\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text" - ] - }, - "nullable": [ - false - ] - }, - "hash": "2589f98459979f776ce5fd6e3ff94ec52b3eb68c346492a8fed98f20f2a0381d" -} diff --git a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json b/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json deleted file mode 100644 index a273eb249a4..00000000000 --- a/core/lib/dal/.sqlx/query-2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n DELETE FROM tee_verifier_input_producer_jobs\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [] - }, - "nullable": [] - }, - "hash": "2ffa321700ef1f70a1c3f516f3162af196d586cc08ea0f23d2c568527e94b41d" -} diff --git a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json b/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json deleted file mode 100644 index 6012c632651..00000000000 --- a/core/lib/dal/.sqlx/query-3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n updated_at = NOW(),\n time_taken = $3,\n error = $4\n WHERE\n l1_batch_number = $2\n AND status != $5\n RETURNING\n tee_verifier_input_producer_jobs.attempts\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Int8", - "Time", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [ - false - ] - }, - "hash": "3010d02e02391732c622a7515e1e494ce1d8e3b6cfb0b897745fb596f283be79" -} diff --git a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json b/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json deleted file mode 100644 index 7245fa3059e..00000000000 --- a/core/lib/dal/.sqlx/query-369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980.json +++ /dev/null @@ -1,24 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n bytecode\n FROM\n (\n SELECT\n *\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) t\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "bytecode", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Bytea", - "Int8", - "Bytea" - ] - }, - "nullable": [ - false - ] - }, - "hash": "369f8f652335176ab22ee45fd6f1717e73c5e6b063be3553d82bfecb98334980" -} diff --git a/core/lib/dal/.sqlx/query-4d1d409b2405a4105feb140720abb480be336b68c127e442ee1bfd177597bd8b.json b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json similarity index 68% rename from core/lib/dal/.sqlx/query-4d1d409b2405a4105feb140720abb480be336b68c127e442ee1bfd177597bd8b.json rename to core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json index c5b5afe85e4..11bff110293 100644 --- a/core/lib/dal/.sqlx/query-4d1d409b2405a4105feb140720abb480be336b68c127e442ee1bfd177597bd8b.json +++ b/core/lib/dal/.sqlx/query-45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n AND events_queue_commitment IS NOT NULL\n AND bootloader_initial_content_commitment IS NOT NULL\n AND (\n data_availability.inclusion_data IS NOT NULL\n OR $4 IS FALSE\n )\n ORDER BY\n number\n LIMIT\n $5\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -186,15 +196,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "4d1d409b2405a4105feb140720abb480be336b68c127e442ee1bfd177597bd8b" + "hash": "45154c2efc8d07c4f83ae3e229f9892118f5732374e62f35e27800422afb5746" } diff --git a/core/lib/dal/.sqlx/query-4e1db16b582aa347dc33fccd8d8afa60b5ca8ce096bfb79172b1b55264f6c987.json b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json similarity index 75% rename from core/lib/dal/.sqlx/query-4e1db16b582aa347dc33fccd8d8afa60b5ca8ce096bfb79172b1b55264f6c987.json rename to core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json index 0c6bb9ca4d2..66d3e18075b 100644 --- a/core/lib/dal/.sqlx/query-4e1db16b582aa347dc33fccd8d8afa60b5ca8ce096bfb79172b1b55264f6c987.json +++ b/core/lib/dal/.sqlx/query-4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number BETWEEN $1 AND $2\n ORDER BY\n number\n LIMIT\n $3\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -184,15 +194,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "4e1db16b582aa347dc33fccd8d8afa60b5ca8ce096bfb79172b1b55264f6c987" + "hash": "4bd1a4e612d10f2ca26068c140442f38816f163a3e3fba4fdbb81076b969e970" } diff --git a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json b/core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json similarity index 76% rename from core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json rename to core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json index 4a73fde57e2..804318120fc 100644 --- a/core/lib/dal/.sqlx/query-454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd.json +++ b/core/lib/dal/.sqlx/query-4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n pubdata_input\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n system_logs,\n pubdata_input,\n fee_address\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -55,18 +55,28 @@ }, { "ordinal": 10, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 11, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 11, + "ordinal": 12, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 12, + "ordinal": 13, "name": "pubdata_input", "type_info": "Bytea" + }, + { + "ordinal": 14, + "name": "fee_address", + "type_info": "Bytea" } ], "parameters": { @@ -86,9 +96,11 @@ true, true, true, + true, false, - true + true, + false ] }, - "hash": "454e16ddb5e85285d0c4b9013bcce5d464ecc55c80b54bc16040226df7e297bd" + "hash": "4e994d519b9c75e64a74423f8c19fbde6eb6634d7a63005081ffc1eb6c28e9ec" } diff --git a/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json b/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json new file mode 100644 index 00000000000..2cd528a9f53 --- /dev/null +++ b/core/lib/dal/.sqlx/query-55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "55c0349569786bac0204272961f3010bea7cee09a9538a4e275ea89f67704966" +} diff --git a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json b/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json deleted file mode 100644 index f34c4a548cb..00000000000 --- a/core/lib/dal/.sqlx/query-5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "5c7409ff9e413e7684cea5df6046f1a607a0bcc6864490c5961dd4e2ee12ed78" -} diff --git a/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json b/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json deleted file mode 100644 index c2d9fe2e1ac..00000000000 --- a/core/lib/dal/.sqlx/query-5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_bwip\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "5dba5f8b178decfd77db0ca4f415e94ad6bdd84c31e4b2e0c629e51857533974" -} diff --git a/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json b/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json new file mode 100644 index 00000000000..c95a5bc6bd4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a.json @@ -0,0 +1,23 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "last_ready_batch!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int8" + ] + }, + "nullable": [ + true + ] + }, + "hash": "5ddf39d930c11e13311c8a88b72f7cae7565e28f72f5ab3d0eb653fa0fbdff0a" +} diff --git a/core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json b/core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json similarity index 57% rename from core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json rename to core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json index 7c3a261d1f6..95957160124 100644 --- a/core/lib/dal/.sqlx/query-2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d.json +++ b/core/lib/dal/.sqlx/query-5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_prove_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_execute_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "2955e976281f9cbd98b7378c5ab52964b268b93c32fd280c49bf9f932884300d" + "hash": "5e8fc8ee5b143a7e0053f7c6f8c93ba84478b6e56c95dfae6d8cc84e938e80c6" } diff --git a/core/lib/dal/.sqlx/query-670f7d170122b6165ea521c482f2ec32d637a8c11af6472b9b390c6ca2b68495.json b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json similarity index 76% rename from core/lib/dal/.sqlx/query-670f7d170122b6165ea521c482f2ec32d637a8c11af6472b9b390c6ca2b68495.json rename to core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json index 74a426f2039..dfdb4b6c82e 100644 --- a/core/lib/dal/.sqlx/query-670f7d170122b6165ea521c482f2ec32d637a8c11af6472b9b390c6ca2b68495.json +++ b/core/lib/dal/.sqlx/query-62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "system_logs", - "type_info": "ByteaArray" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,36 +120,46 @@ }, { "ordinal": 23, + "name": "system_logs", + "type_info": "ByteaArray" + }, + { + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -181,16 +191,18 @@ true, true, true, - false, true, true, + false, + true, true, true, + false, true, true, true, true ] }, - "hash": "670f7d170122b6165ea521c482f2ec32d637a8c11af6472b9b390c6ca2b68495" + "hash": "62e8330881b73917394384adbf73911add046315e5f8877bc57a34e3dadf9e37" } diff --git a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json b/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json deleted file mode 100644 index 01ede1d8643..00000000000 --- a/core/lib/dal/.sqlx/query-6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n tee_verifier_input_producer_jobs (\n l1_batch_number, status, created_at, updated_at\n )\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO NOTHING\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - } - ] - }, - "nullable": [] - }, - "hash": "6307b42717586a13ac8bc30fc3e5aea6710351dea1f1e57d73447476c3fcd199" -} diff --git a/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json b/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json new file mode 100644 index 00000000000..306f193861f --- /dev/null +++ b/core/lib/dal/.sqlx/query-746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE l1_batches\n SET\n l1_tx_count = $2,\n l2_tx_count = $3,\n l2_to_l1_messages = $4,\n bloom = $5,\n priority_ops_onchain_data = $6,\n predicted_commit_gas_cost = $7,\n predicted_prove_gas_cost = $8,\n predicted_execute_gas_cost = $9,\n initial_bootloader_heap_content = $10,\n used_contract_hashes = $11,\n bootloader_code_hash = $12,\n default_aa_code_hash = $13,\n evm_emulator_code_hash = $14,\n protocol_version = $15,\n system_logs = $16,\n storage_refunds = $17,\n pubdata_costs = $18,\n pubdata_input = $19,\n predicted_circuits_by_type = $20,\n updated_at = NOW(),\n is_sealed = TRUE\n WHERE\n number = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int4", + "Int4", + "ByteaArray", + "Bytea", + "ByteaArray", + "Int8", + "Int8", + "Int8", + "Jsonb", + "Jsonb", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "ByteaArray", + "Int8Array", + "Int8Array", + "Bytea", + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "746d8b62d576b4b9596458aa865e0294e53eb37c1a2dbcc3044b8311200d549a" +} diff --git a/core/lib/dal/.sqlx/query-9ea417e4ffef9e5d158089723692ba43fe8560be0c4aa7baa49e71b2a28187e7.json b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json similarity index 60% rename from core/lib/dal/.sqlx/query-9ea417e4ffef9e5d158089723692ba43fe8560be0c4aa7baa49e71b2a28187e7.json rename to core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json index ebed8e99f5b..6cc2e22382d 100644 --- a/core/lib/dal/.sqlx/query-9ea417e4ffef9e5d158089723692ba43fe8560be0c4aa7baa49e71b2a28187e7.json +++ b/core/lib/dal/.sqlx/query-7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\",\n miniblocks.l2_da_validator_address AS \"l2_da_validator_address!\",\n miniblocks.pubdata_type AS \"pubdata_type!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\",\n (miniblocks.l1_tx_count + miniblocks.l2_tx_count) AS \"tx_count!\",\n miniblocks.timestamp,\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.evm_emulator_code_hash,\n miniblocks.virtual_blocks,\n miniblocks.hash,\n miniblocks.protocol_version AS \"protocol_version!\",\n miniblocks.fee_account_address AS \"fee_account_address!\",\n miniblocks.l2_da_validator_address AS \"l2_da_validator_address!\",\n miniblocks.pubdata_type AS \"pubdata_type!\"\n FROM\n miniblocks\n WHERE\n miniblocks.number BETWEEN $1 AND $2\n ", "describe": { "columns": [ { @@ -50,31 +50,36 @@ }, { "ordinal": 9, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 10, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 11, "name": "hash", "type_info": "Bytea" }, { - "ordinal": 11, + "ordinal": 12, "name": "protocol_version!", "type_info": "Int4" }, { - "ordinal": 12, + "ordinal": 13, "name": "fee_account_address!", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 14, "name": "l2_da_validator_address!", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 15, "name": "pubdata_type!", "type_info": "Text" } @@ -95,6 +100,7 @@ true, true, true, + true, false, false, true, @@ -103,5 +109,5 @@ false ] }, - "hash": "9ea417e4ffef9e5d158089723692ba43fe8560be0c4aa7baa49e71b2a28187e7" + "hash": "7553d8013d101af0451830d26b7d7bd02627ebaf2df7c5ad517cb60a243182d2" } diff --git a/core/lib/dal/.sqlx/query-66c2d8f27715ee11b0a7c4b9fd7e2e6718eea8ba12757ec77889233542b15b40.json b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json similarity index 78% rename from core/lib/dal/.sqlx/query-66c2d8f27715ee11b0a7c4b9fd7e2e6718eea8ba12757ec77889233542b15b40.json rename to core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json index e26688c658e..f4e08abe31c 100644 --- a/core/lib/dal/.sqlx/query-66c2d8f27715ee11b0a7c4b9fd7e2e6718eea8ba12757ec77889233542b15b40.json +++ b/core/lib/dal/.sqlx/query-77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND eth_prove_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n system_logs,\n compressed_state_diffs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -90,28 +90,28 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, - "name": "protocol_version", - "type_info": "Int4" + "name": "meta_parameters_hash", + "type_info": "Bytea" }, { "ordinal": 21, - "name": "compressed_state_diffs", - "type_info": "Bytea" + "name": "protocol_version", + "type_info": "Int4" }, { "ordinal": 22, @@ -120,36 +120,46 @@ }, { "ordinal": 23, - "name": "events_queue_commitment", + "name": "compressed_state_diffs", "type_info": "Bytea" }, { "ordinal": 24, - "name": "bootloader_initial_content_commitment", + "name": "events_queue_commitment", "type_info": "Bytea" }, { "ordinal": 25, - "name": "pubdata_input", + "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { "ordinal": 26, - "name": "aggregation_root", + "name": "pubdata_input", "type_info": "Bytea" }, { "ordinal": 27, - "name": "local_root", + "name": "fee_address", "type_info": "Bytea" }, { "ordinal": 28, - "name": "state_diff_hash", + "name": "aggregation_root", "type_info": "Bytea" }, { "ordinal": 29, + "name": "local_root", + "type_info": "Bytea" + }, + { + "ordinal": 30, + "name": "state_diff_hash", + "type_info": "Bytea" + }, + { + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -187,10 +197,12 @@ true, true, true, + false, + true, true, true, true ] }, - "hash": "66c2d8f27715ee11b0a7c4b9fd7e2e6718eea8ba12757ec77889233542b15b40" + "hash": "77864e5eb5eada8edf8f4457aa153369701d7cd5f75ca031bf77ca27d0437cb9" } diff --git a/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json new file mode 100644 index 00000000000..f89f531c446 --- /dev/null +++ b/core/lib/dal/.sqlx/query-7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Bytea", + "Int4", + "Int4", + "Bytea", + "Numeric", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Bytea", + "Int4", + "Int8", + "Int8", + "Int8", + "Bytea", + "Bytea", + "Text" + ] + }, + "nullable": [] + }, + "hash": "7d8c19c3568c03ec3e4a788b22c233f6503bc79cc9f809d35c558e275ba117ba" +} diff --git a/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json b/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json new file mode 100644 index 00000000000..df856b97702 --- /dev/null +++ b/core/lib/dal/.sqlx/query-8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a.json @@ -0,0 +1,56 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n number,\n timestamp,\n protocol_version,\n fee_address,\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n l1_batches\n WHERE\n NOT is_sealed\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "timestamp", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 3, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 4, + "name": "l1_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 5, + "name": "l2_fair_gas_price", + "type_info": "Int8" + }, + { + "ordinal": 6, + "name": "fair_pubdata_price", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + false, + false, + true, + false, + false, + false, + false + ] + }, + "hash": "8435ed4ee2a9b962116ecfa522f4ba52c9a0e64d1badc39cc2fef29b1468621a" +} diff --git a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json similarity index 62% rename from core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json rename to core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json index 162c722add9..d944b6abf9e 100644 --- a/core/lib/dal/.sqlx/query-c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7.json +++ b/core/lib/dal/.sqlx/query-89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n protocol_versions.id AS \"minor!\",\n protocol_versions.timestamp,\n protocol_versions.bootloader_code_hash,\n protocol_versions.default_account_code_hash,\n protocol_versions.evm_emulator_code_hash,\n protocol_patches.patch,\n protocol_patches.snark_wrapper_vk_hash\n FROM\n protocol_versions\n JOIN protocol_patches ON protocol_patches.minor = protocol_versions.id\n WHERE\n id = $1\n ORDER BY\n protocol_patches.patch DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -25,11 +25,16 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "patch", "type_info": "Int4" }, { - "ordinal": 5, + "ordinal": 6, "name": "snark_wrapper_vk_hash", "type_info": "Bytea" } @@ -44,9 +49,10 @@ false, false, false, + true, false, false ] }, - "hash": "c4e81f78ef078cbed2d588be91418997f003e26768c5de663160d89af54a1ee7" + "hash": "89e53b297b2b1c0dfb263f9175cb70e7a5fe02b60d5d23e4d153190138112c5b" } diff --git a/core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json b/core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json similarity index 72% rename from core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json rename to core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json index 100761f54b4..ea2b51d69d1 100644 --- a/core/lib/dal/.sqlx/query-0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41.json +++ b/core/lib/dal/.sqlx/query-8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n number = $1\n ", + "query": "\n SELECT\n protocol_version\n FROM\n l1_batches\n WHERE\n is_sealed\n AND number = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ true ] }, - "hash": "0c95fbfb3a816bd49fd06e3a4f0a52daa202279bf612a9278f663deb78bc6e41" + "hash": "8cfde47f25cf65030f34e70edf83d3b4c514a18d8a33ec978d3e8007af8d0c20" } diff --git a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json b/core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json similarity index 73% rename from core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json rename to core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json index 9b989a9ba25..82af00b5606 100644 --- a/core/lib/dal/.sqlx/query-38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c.json +++ b/core/lib/dal/.sqlx/query-8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n MAX(number) AS \"number\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "38a8b00e320b16e99f6ea0e5954e2f7e49cd6600bd3d56cf41795c2c9e082e4c" + "hash": "8f588aa010f42c3b0b68efe6e0e8526c586708c812dc00b10bf3cd8aa871d9c2" } diff --git a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json b/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json deleted file mode 100644 index 3b8accb4fda..00000000000 --- a/core/lib/dal/.sqlx/query-96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d.json +++ /dev/null @@ -1,28 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - }, - { - "ordinal": 1, - "name": "index_in_block", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - true, - true - ] - }, - "hash": "96adbd0c9a5786a6cca74324353c7d8bbdbee28d4ac2a2c0a331298c5e39b71d" -} diff --git a/core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json b/core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json similarity index 69% rename from core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json rename to core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json index f9799079442..08e3b4b17a9 100644 --- a/core/lib/dal/.sqlx/query-86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d.json +++ b/core/lib/dal/.sqlx/query-9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MIN(number) AS \"min?\"\n FROM\n l1_batches\n WHERE\n protocol_version = $1\n ", + "query": "\n SELECT\n MIN(number) AS \"min?\"\n FROM\n l1_batches\n WHERE\n is_sealed\n AND protocol_version = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ null ] }, - "hash": "86cbe509988c8775bcf738d5cb1edac2f0db60c263c1564b64c717f8ae53e44d" + "hash": "9b9e5bf97503ed64128dfb16564c019baa12323fd3ef381fdacf290a3db3ec77" } diff --git a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json b/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json deleted file mode 100644 index d2c999a70d4..00000000000 --- a/core/lib/dal/.sqlx/query-9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e.json +++ /dev/null @@ -1,33 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n l1_batches (\n number,\n l1_tx_count,\n l2_tx_count,\n timestamp,\n l2_to_l1_messages,\n bloom,\n priority_ops_onchain_data,\n predicted_commit_gas_cost,\n predicted_prove_gas_cost,\n predicted_execute_gas_cost,\n initial_bootloader_heap_content,\n used_contract_hashes,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n system_logs,\n storage_refunds,\n pubdata_costs,\n pubdata_input,\n predicted_circuits_by_type,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n $20,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int4", - "Int4", - "Int8", - "ByteaArray", - "Bytea", - "ByteaArray", - "Int8", - "Int8", - "Int8", - "Jsonb", - "Jsonb", - "Bytea", - "Bytea", - "Int4", - "ByteaArray", - "Int8Array", - "Int8Array", - "Bytea", - "Jsonb" - ] - }, - "nullable": [] - }, - "hash": "9c6e1d3bd95d03ef32835dc454663f500b8358757e5a453cf0a87d5cd9620d7e" -} diff --git a/core/lib/dal/.sqlx/query-dc9a3821560030a8daf8dbdafe5f52aed204a20c67a6b959b16c2a60c745321e.json b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json similarity index 71% rename from core/lib/dal/.sqlx/query-dc9a3821560030a8daf8dbdafe5f52aed204a20c67a6b959b16c2a60c745321e.json rename to core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json index 269c3ebb328..9a93ba45978 100644 --- a/core/lib/dal/.sqlx/query-dc9a3821560030a8daf8dbdafe5f52aed204a20c67a6b959b16c2a60c745321e.json +++ b/core/lib/dal/.sqlx/query-a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n system_logs,\n compressed_state_diffs,\n protocol_version,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n (\n SELECT\n l1_batches.*,\n ROW_NUMBER() OVER (\n ORDER BY\n number ASC\n ) AS row_number\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NOT NULL\n AND l1_batches.skip_proof = TRUE\n AND l1_batches.number > $1\n ORDER BY\n number\n LIMIT\n $2\n ) inn\n LEFT JOIN commitments ON commitments.l1_batch_number = inn.number\n LEFT JOIN data_availability ON data_availability.l1_batch_number = inn.number\n WHERE\n number - row_number = $1\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -181,17 +191,19 @@ true, true, true, + true, false, true, true, true, true, true, + false, true, true, true, true ] }, - "hash": "dc9a3821560030a8daf8dbdafe5f52aed204a20c67a6b959b16c2a60c745321e" + "hash": "a42121cd85daeb95ee268ba5cff1806fcc54d73216a7dc54be6ba210ef02d789" } diff --git a/core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json b/core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json similarity index 73% rename from core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json rename to core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json index 56fcdb38943..9a1b043e573 100644 --- a/core/lib/dal/.sqlx/query-43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1.json +++ b/core/lib/dal/.sqlx/query-a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n MIN(number) AS \"number\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "43c7e352d09f69de1a182196aea4de79b67833f17d252b5b0e8e00cd6e75b5c1" + "hash": "a47eee902a0109b072365178e073ac6f0b00c05229a0bd40902d5fcb1c1bf026" } diff --git a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json similarity index 73% rename from core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json rename to core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json index 81ae6c590f9..28ffcc5ae46 100644 --- a/core/lib/dal/.sqlx/query-2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1.json +++ b/core/lib/dal/.sqlx/query-a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", + "query": "\n WITH\n mb AS (\n SELECT\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price\n FROM\n miniblocks\n WHERE\n l1_batch_number = $1\n LIMIT\n 1\n )\n \n SELECT\n l1_batches.number,\n l1_batches.timestamp,\n l1_batches.l1_tx_count,\n l1_batches.l2_tx_count,\n l1_batches.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n mb.l1_gas_price,\n mb.l2_fair_gas_price,\n mb.fair_pubdata_price,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash\n FROM\n l1_batches\n INNER JOIN mb ON TRUE\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ", "describe": { "columns": [ { @@ -82,6 +82,11 @@ "ordinal": 15, "name": "default_aa_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 16, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -105,8 +110,9 @@ false, true, true, + true, true ] }, - "hash": "2d467a96fd065137d1038998ae5f8dcb1642688940bc52831498479b250de2b1" + "hash": "a88b113b5dc06ac990a66202b3c05e2c2f10d5cbdb03d02c3c541f7eaa1f58a6" } diff --git a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json b/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json deleted file mode 100644 index b17b5828211..00000000000 --- a/core/lib/dal/.sqlx/query-aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_proof_generation_details\n SET\n status = $1,\n updated_at = NOW(),\n prover_taken_at = NOW()\n WHERE\n tee_type = $2\n AND l1_batch_number = (\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n JOIN\n tee_verifier_input_producer_jobs AS inputs\n ON proofs.l1_batch_number = inputs.l1_batch_number\n WHERE\n inputs.status = $3\n AND (\n proofs.status = $4\n OR (\n proofs.status = $1\n AND proofs.prover_taken_at < NOW() - $5::INTERVAL\n )\n )\n AND proofs.l1_batch_number >= $6\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_proof_generation_details.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Text", - "Text", - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Text", - "Interval", - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "aa68653ef4ff7e0cfddfc0a7c8f3c39e774c405508e73e77cdd0c01f924c97c0" -} diff --git a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json similarity index 54% rename from core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json rename to core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json index 8c41c0ab976..9d9fa72595d 100644 --- a/core/lib/dal/.sqlx/query-048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016.json +++ b/core/lib/dal/.sqlx/query-b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, NOW())\n ON CONFLICT DO NOTHING\n ", + "query": "\n INSERT INTO\n protocol_versions (\n id,\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash,\n created_at\n )\n VALUES\n ($1, $2, $3, $4, $5, $6, NOW())\n ON CONFLICT DO NOTHING\n ", "describe": { "columns": [], "parameters": { @@ -9,10 +9,11 @@ "Int8", "Bytea", "Bytea", + "Bytea", "Bytea" ] }, "nullable": [] }, - "hash": "048f255042f0a75c04bc19369c4b0dd75acbe4248159d2fb62d9e9878c158016" + "hash": "b23f9879be394270a0985c082fd2e82c5aa84c85b9486e81261d17901a786917" } diff --git a/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json b/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json new file mode 100644 index 00000000000..78b913fcc36 --- /dev/null +++ b/core/lib/dal/.sqlx/query-b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798.json @@ -0,0 +1,20 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n l1_batches (\n number,\n timestamp,\n protocol_version,\n fee_address,\n l1_gas_price,\n l2_fair_gas_price,\n fair_pubdata_price,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n initial_bootloader_heap_content,\n used_contract_hashes,\n created_at,\n updated_at,\n is_sealed\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n 0,\n 0,\n ''::bytea,\n '{}'::bytea [],\n '{}'::jsonb,\n '{}'::jsonb,\n NOW(),\n NOW(),\n FALSE\n )\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int8", + "Int4", + "Bytea", + "Int8", + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "b282359f07eb8372e973a51a27e0a5b8081edf28fa1b67f71101d2e3621be798" +} diff --git a/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json b/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json deleted file mode 100644 index 120fac1021f..00000000000 --- a/core/lib/dal/.sqlx/query-b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c.json +++ /dev/null @@ -1,23 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n WITH\n available_batches AS (\n SELECT\n MAX(number) AS \"last_batch\"\n FROM\n l1_batches\n ),\n \n processed_batches AS (\n SELECT\n COALESCE(MAX(l1_batch_number), $1) + $2 AS \"last_ready_batch\"\n FROM\n vm_runner_protective_reads\n WHERE\n time_taken IS NOT NULL\n )\n \n SELECT\n LEAST(last_batch, last_ready_batch) AS \"last_ready_batch!\"\n FROM\n available_batches\n FULL JOIN processed_batches ON TRUE\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "last_ready_batch!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8", - "Int8" - ] - }, - "nullable": [ - true - ] - }, - "hash": "b6bfb2d998857f165ee04e282e299bf19b587a16ad70a671b0de48fd608bf31c" -} diff --git a/core/lib/dal/.sqlx/query-31308e6469a98e9662ff284a89ce264ca7b68c54d894fad9d760324455321080.json b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json similarity index 71% rename from core/lib/dal/.sqlx/query-31308e6469a98e9662ff284a89ce264ca7b68c54d894fad9d760324455321080.json rename to core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json index 71da9df70be..8a68b1a9b9b 100644 --- a/core/lib/dal/.sqlx/query-31308e6469a98e9662ff284a89ce264ca7b68c54d894fad9d760324455321080.json +++ b/core/lib/dal/.sqlx/query-b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", + "query": "\n SELECT\n number,\n l1_batches.timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n l1_batches.bootloader_code_hash,\n l1_batches.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n JOIN protocol_versions ON protocol_versions.id = l1_batches.protocol_version\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_commit_tx_id IS NULL\n AND number != 0\n AND protocol_versions.bootloader_code_hash = $1\n AND protocol_versions.default_account_code_hash = $2\n AND commitment IS NOT NULL\n AND (\n protocol_versions.id = $3\n OR protocol_versions.upgrade_tx_hash IS NULL\n )\n ORDER BY\n number\n LIMIT\n $4\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -185,15 +195,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "31308e6469a98e9662ff284a89ce264ca7b68c54d894fad9d760324455321080" + "hash": "b7d448837439a3e3dfe73070d3c20e9c138d0a6d35e9ce7fc396c5e76fbc25dd" } diff --git a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json similarity index 52% rename from core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json rename to core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json index eba36994fb3..2689716c38a 100644 --- a/core/lib/dal/.sqlx/query-5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6.json +++ b/core/lib/dal/.sqlx/query-bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -12,6 +12,11 @@ "ordinal": 1, "name": "default_account_code_hash", "type_info": "Bytea" + }, + { + "ordinal": 2, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" } ], "parameters": { @@ -21,8 +26,9 @@ }, "nullable": [ false, - false + false, + true ] }, - "hash": "5d493cbce749cc5b56d4069423597b16599abaf51df0f19effe1a536376cf6a6" + "hash": "bdd9b56fd8505170125d4e1271f865162bce330edd9b16587e8f9fdab17a8456" } diff --git a/core/lib/dal/.sqlx/query-db3593883d5e1e636d65e25cc744637fc33467fbd64da5a431ecab194409371c.json b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json similarity index 73% rename from core/lib/dal/.sqlx/query-db3593883d5e1e636d65e25cc744637fc33467fbd64da5a431ecab194409371c.json rename to core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json index 40ed6af677a..f97ea8a6ccd 100644 --- a/core/lib/dal/.sqlx/query-db3593883d5e1e636d65e25cc744637fc33467fbd64da5a431ecab194409371c.json +++ b/core/lib/dal/.sqlx/query-c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n l1_tx_count,\n l2_tx_count,\n bloom,\n priority_ops_onchain_data,\n hash,\n commitment,\n l2_to_l1_messages,\n used_contract_hashes,\n compressed_initial_writes,\n compressed_repeated_writes,\n l2_l1_merkle_root,\n rollup_last_leaf_index,\n zkporter_is_available,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n aux_data_hash,\n pass_through_data_hash,\n meta_parameters_hash,\n protocol_version,\n compressed_state_diffs,\n system_logs,\n events_queue_commitment,\n bootloader_initial_content_commitment,\n pubdata_input,\n fee_address,\n aggregation_root,\n local_root,\n state_diff_hash,\n data_availability.inclusion_data\n FROM\n l1_batches\n LEFT JOIN commitments ON commitments.l1_batch_number = l1_batches.number\n LEFT JOIN\n data_availability\n ON data_availability.l1_batch_number = l1_batches.number\n WHERE\n eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n ORDER BY\n number\n LIMIT\n $1\n ", "describe": { "columns": [ { @@ -90,66 +90,76 @@ }, { "ordinal": 17, - "name": "aux_data_hash", + "name": "evm_emulator_code_hash", "type_info": "Bytea" }, { "ordinal": 18, - "name": "pass_through_data_hash", + "name": "aux_data_hash", "type_info": "Bytea" }, { "ordinal": 19, - "name": "meta_parameters_hash", + "name": "pass_through_data_hash", "type_info": "Bytea" }, { "ordinal": 20, + "name": "meta_parameters_hash", + "type_info": "Bytea" + }, + { + "ordinal": 21, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 21, + "ordinal": 22, "name": "compressed_state_diffs", "type_info": "Bytea" }, { - "ordinal": 22, + "ordinal": 23, "name": "system_logs", "type_info": "ByteaArray" }, { - "ordinal": 23, + "ordinal": 24, "name": "events_queue_commitment", "type_info": "Bytea" }, { - "ordinal": 24, + "ordinal": 25, "name": "bootloader_initial_content_commitment", "type_info": "Bytea" }, { - "ordinal": 25, + "ordinal": 26, "name": "pubdata_input", "type_info": "Bytea" }, { - "ordinal": 26, + "ordinal": 27, + "name": "fee_address", + "type_info": "Bytea" + }, + { + "ordinal": 28, "name": "aggregation_root", "type_info": "Bytea" }, { - "ordinal": 27, + "ordinal": 29, "name": "local_root", "type_info": "Bytea" }, { - "ordinal": 28, + "ordinal": 30, "name": "state_diff_hash", "type_info": "Bytea" }, { - "ordinal": 29, + "ordinal": 31, "name": "inclusion_data", "type_info": "Bytea" } @@ -182,15 +192,17 @@ true, true, true, + true, false, true, true, true, + false, true, true, true, true ] }, - "hash": "db3593883d5e1e636d65e25cc744637fc33467fbd64da5a431ecab194409371c" + "hash": "c5aedd2b1871d8f6276a31482caa673e4b5bba059ebe07bbbb64578881db030b" } diff --git a/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json new file mode 100644 index 00000000000..20b79199165 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967.json @@ -0,0 +1,30 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n bytecode_hash,\n bytecode\n FROM\n (\n SELECT\n value\n FROM\n storage_logs\n WHERE\n storage_logs.hashed_key = $1\n AND storage_logs.miniblock_number <= $2\n ORDER BY\n storage_logs.miniblock_number DESC,\n storage_logs.operation_number DESC\n LIMIT\n 1\n ) deploy_log\n JOIN factory_deps ON value = factory_deps.bytecode_hash\n WHERE\n value != $3\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "bytecode_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "bytecode", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Bytea", + "Int8", + "Bytea" + ] + }, + "nullable": [ + false, + false + ] + }, + "hash": "c61b15a9591e65eab7d226f5b9035bfda52cc5bb5a4bfb11270a2a784491c967" +} diff --git a/core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json b/core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json similarity index 74% rename from core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json rename to core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json index c63ea98db44..8f6d1cf7a5f 100644 --- a/core/lib/dal/.sqlx/query-31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8.json +++ b/core/lib/dal/.sqlx/query-cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n l1_batches\n ", + "query": "\n SELECT\n COUNT(*) AS \"count!\"\n FROM\n l1_batches\n WHERE\n is_sealed\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "31f12a8c44124bb2ce31889ac5295f3823926f69cb1d54874878e6d6c301bfd8" + "hash": "cbc0e202a6da5092251f278d7dc5c058f9ad703461a1f55c534bf3d9f48eb61b" } diff --git a/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json b/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json new file mode 100644 index 00000000000..4b219bfee0a --- /dev/null +++ b/core/lib/dal/.sqlx/query-cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n WITH upsert AS (\n SELECT\n p.l1_batch_number\n FROM\n proof_generation_details p\n LEFT JOIN\n tee_proof_generation_details tee\n ON\n p.l1_batch_number = tee.l1_batch_number\n AND tee.tee_type = $1\n WHERE\n (\n p.l1_batch_number >= $5\n AND p.vm_run_data_blob_url IS NOT NULL\n AND p.proof_gen_data_blob_url IS NOT NULL\n )\n AND (\n tee.l1_batch_number IS NULL\n OR (\n tee.status = $3\n OR (\n tee.status = $2\n AND tee.prover_taken_at < NOW() - $4::INTERVAL\n )\n )\n )\n FETCH FIRST ROW ONLY\n )\n \n INSERT INTO\n tee_proof_generation_details (\n l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at\n )\n SELECT\n l1_batch_number,\n $1,\n $2,\n NOW(),\n NOW(),\n NOW()\n FROM\n upsert\n ON CONFLICT (l1_batch_number, tee_type) DO\n UPDATE\n SET\n status = $2,\n updated_at = NOW(),\n prover_taken_at = NOW()\n RETURNING\n l1_batch_number\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text", + "Text", + "Text", + "Interval", + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "cee7a608bd77815e9582531383481b01395cfd2a3e95fb4593229bd878163320" +} diff --git a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json b/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json similarity index 55% rename from core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json rename to core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json index f440a265593..ed3270de573 100644 --- a/core/lib/dal/.sqlx/query-b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad.json +++ b/core/lib/dal/.sqlx/query-d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", + "query": "\n SELECT\n miniblocks.number,\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n )\n ) AS \"l1_batch_number!\",\n miniblocks.timestamp,\n miniblocks.l1_tx_count,\n miniblocks.l2_tx_count,\n miniblocks.hash AS \"root_hash?\",\n commit_tx.tx_hash AS \"commit_tx_hash?\",\n commit_tx.confirmed_at AS \"committed_at?\",\n prove_tx.tx_hash AS \"prove_tx_hash?\",\n prove_tx.confirmed_at AS \"proven_at?\",\n execute_tx.tx_hash AS \"execute_tx_hash?\",\n execute_tx.confirmed_at AS \"executed_at?\",\n miniblocks.l1_gas_price,\n miniblocks.l2_fair_gas_price,\n miniblocks.fair_pubdata_price,\n miniblocks.bootloader_code_hash,\n miniblocks.default_aa_code_hash,\n l1_batches.evm_emulator_code_hash,\n miniblocks.protocol_version,\n miniblocks.fee_account_address\n FROM\n miniblocks\n LEFT JOIN l1_batches ON miniblocks.l1_batch_number = l1_batches.number\n LEFT JOIN eth_txs_history AS commit_tx\n ON (\n l1_batches.eth_commit_tx_id = commit_tx.eth_tx_id\n AND commit_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS prove_tx\n ON (\n l1_batches.eth_prove_tx_id = prove_tx.eth_tx_id\n AND prove_tx.confirmed_at IS NOT NULL\n )\n LEFT JOIN eth_txs_history AS execute_tx\n ON (\n l1_batches.eth_execute_tx_id = execute_tx.eth_tx_id\n AND execute_tx.confirmed_at IS NOT NULL\n )\n WHERE\n miniblocks.number = $1\n ", "describe": { "columns": [ { @@ -90,11 +90,16 @@ }, { "ordinal": 17, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 18, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 18, + "ordinal": 19, "name": "fee_account_address", "type_info": "Bytea" } @@ -123,8 +128,9 @@ true, true, true, + true, false ] }, - "hash": "b3bccd33945c657856f12b9ced6addab6569d897e748d39abca9538872d986ad" + "hash": "d3760406b7bf5d14a3fe6cbc9fb3926df634ebf0d8286181fa04884fb747cee8" } diff --git a/core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json b/core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json similarity index 57% rename from core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json rename to core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json index 0370a63d65e..15d6096420f 100644 --- a/core/lib/dal/.sqlx/query-d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99.json +++ b/core/lib/dal/.sqlx/query-d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_commit_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_commit_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "d14b52df2cd9f9e484c60ba00383b438f14b68535111cf2cedd363fc646aac99" + "hash": "d38116f1664a3ab88d285297e8caebfcd3b9c287fecde4376afa84c0566a55ef" } diff --git a/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json b/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json new file mode 100644 index 00000000000..0aac086f22a --- /dev/null +++ b/core/lib/dal/.sqlx/query-d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n WHERE\n is_sealed\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\"\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number!", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "d4545d817e942dddde53f117e801cd8aa22592f0808f3c2f0555ca321459815e" +} diff --git a/core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json b/core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json similarity index 57% rename from core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json rename to core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json index 3052b3a04d1..baabbdb4f24 100644 --- a/core/lib/dal/.sqlx/query-877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b.json +++ b/core/lib/dal/.sqlx/query-d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n eth_execute_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", + "query": "\n SELECT\n timestamp\n FROM\n l1_batches\n WHERE\n is_sealed\n AND eth_prove_tx_id IS NULL\n AND number > 0\n ORDER BY\n number\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ false ] }, - "hash": "877d20634068170326ab5801b69c70aff49e60b7def3d93b9206e650c259168b" + "hash": "d47226eb9b1abe6436f5ef76eba9ddb30ca0d9ea0190786b8e8472c622e98b9c" } diff --git a/core/lib/dal/.sqlx/query-7af141a4533b332903b7ba5591b1c90ac9deb75cd2a542fe649d7830496a0756.json b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json similarity index 72% rename from core/lib/dal/.sqlx/query-7af141a4533b332903b7ba5591b1c90ac9deb75cd2a542fe649d7830496a0756.json rename to core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json index eb6d9c3640d..111234e02b7 100644 --- a/core/lib/dal/.sqlx/query-7af141a4533b332903b7ba5591b1c90ac9deb75cd2a542fe649d7830496a0756.json +++ b/core/lib/dal/.sqlx/query-d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n l2_da_validator_address AS \"l2_da_validator_address!\",\n pubdata_type AS \"pubdata_type!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom\n FROM\n miniblocks\n WHERE\n number = $1\n ", + "query": "\n SELECT\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address AS \"fee_account_address!\",\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n evm_emulator_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n logs_bloom,\n l2_da_validator_address,\n pubdata_type\n FROM\n miniblocks\n WHERE\n number = $1\n ", "describe": { "columns": [ { @@ -35,68 +35,73 @@ }, { "ordinal": 6, - "name": "l2_da_validator_address!", - "type_info": "Bytea" - }, - { - "ordinal": 7, - "name": "pubdata_type!", - "type_info": "Text" - }, - { - "ordinal": 8, "name": "base_fee_per_gas", "type_info": "Numeric" }, { - "ordinal": 9, + "ordinal": 7, "name": "l1_gas_price", "type_info": "Int8" }, { - "ordinal": 10, + "ordinal": 8, "name": "l2_fair_gas_price", "type_info": "Int8" }, { - "ordinal": 11, + "ordinal": 9, "name": "gas_per_pubdata_limit", "type_info": "Int8" }, { - "ordinal": 12, + "ordinal": 10, "name": "bootloader_code_hash", "type_info": "Bytea" }, { - "ordinal": 13, + "ordinal": 11, "name": "default_aa_code_hash", "type_info": "Bytea" }, { - "ordinal": 14, + "ordinal": 12, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 13, "name": "protocol_version", "type_info": "Int4" }, { - "ordinal": 15, + "ordinal": 14, "name": "virtual_blocks", "type_info": "Int8" }, { - "ordinal": 16, + "ordinal": 15, "name": "fair_pubdata_price", "type_info": "Int8" }, { - "ordinal": 17, + "ordinal": 16, "name": "gas_limit", "type_info": "Int8" }, { - "ordinal": 18, + "ordinal": 17, "name": "logs_bloom", "type_info": "Bytea" + }, + { + "ordinal": 18, + "name": "l2_da_validator_address", + "type_info": "Bytea" + }, + { + "ordinal": 19, + "name": "pubdata_type", + "type_info": "Text" } ], "parameters": { @@ -115,16 +120,17 @@ false, false, false, - false, - false, + true, true, true, true, false, true, true, - true + true, + false, + false ] }, - "hash": "7af141a4533b332903b7ba5591b1c90ac9deb75cd2a542fe649d7830496a0756" + "hash": "d4cdd4eed07dfdad2757c480903f7999eabb611338925abe9dc9e64c837183d9" } diff --git a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json b/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json deleted file mode 100644 index fa1a5d6741a..00000000000 --- a/core/lib/dal/.sqlx/query-d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860.json +++ /dev/null @@ -1,65 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE tee_verifier_input_producer_jobs\n SET\n status = $1,\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n tee_verifier_input_producer_jobs\n WHERE\n status = $2\n OR (\n status = $1\n AND processing_started_at < NOW() - $4::INTERVAL\n )\n OR (\n status = $3\n AND attempts < $5\n )\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n tee_verifier_input_producer_jobs.l1_batch_number\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - { - "Custom": { - "name": "tee_verifier_input_producer_job_status", - "kind": { - "Enum": [ - "Queued", - "ManuallySkipped", - "InProgress", - "Successful", - "Failed" - ] - } - } - }, - "Interval", - "Int2" - ] - }, - "nullable": [ - false - ] - }, - "hash": "d89eabd1edb4ee4657249f9f4ab729b9149fee37c5ef7d69e259ee33cb8ca860" -} diff --git a/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json b/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json deleted file mode 100644 index 2598be6267d..00000000000 --- a/core/lib/dal/.sqlx/query-e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n COALESCE(\n miniblocks.l1_batch_number,\n (\n SELECT\n (MAX(number) + 1)\n FROM\n l1_batches\n ),\n (\n SELECT\n MAX(l1_batch_number) + 1\n FROM\n snapshot_recovery\n )\n ) AS \"l1_batch_number!\"\n FROM\n miniblocks\n WHERE\n number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "l1_batch_number!", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "e475ff151b9f6c76f1e4e9ee2283cab780f7ed1d91199b4d34011cdc9376c005" -} diff --git a/core/lib/dal/.sqlx/query-f461f21fcc8b8e88d7cb8cfc38a15f75badf7801f687af19163f5f533e20fbc7.json b/core/lib/dal/.sqlx/query-f461f21fcc8b8e88d7cb8cfc38a15f75badf7801f687af19163f5f533e20fbc7.json deleted file mode 100644 index 126a7bafc00..00000000000 --- a/core/lib/dal/.sqlx/query-f461f21fcc8b8e88d7cb8cfc38a15f75badf7801f687af19163f5f533e20fbc7.json +++ /dev/null @@ -1,32 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n miniblocks (\n number,\n timestamp,\n hash,\n l1_tx_count,\n l2_tx_count,\n fee_account_address,\n base_fee_per_gas,\n l1_gas_price,\n l2_fair_gas_price,\n gas_per_pubdata_limit,\n bootloader_code_hash,\n default_aa_code_hash,\n protocol_version,\n virtual_blocks,\n fair_pubdata_price,\n gas_limit,\n l2_da_validator_address,\n pubdata_type,\n logs_bloom,\n created_at,\n updated_at\n )\n VALUES\n (\n $1,\n $2,\n $3,\n $4,\n $5,\n $6,\n $7,\n $8,\n $9,\n $10,\n $11,\n $12,\n $13,\n $14,\n $15,\n $16,\n $17,\n $18,\n $19,\n NOW(),\n NOW()\n )\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Int8", - "Int8", - "Bytea", - "Int4", - "Int4", - "Bytea", - "Numeric", - "Int8", - "Int8", - "Int8", - "Bytea", - "Bytea", - "Int4", - "Int8", - "Int8", - "Int8", - "Bytea", - "Text", - "Bytea" - ] - }, - "nullable": [] - }, - "hash": "f461f21fcc8b8e88d7cb8cfc38a15f75badf7801f687af19163f5f533e20fbc7" -} diff --git a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json similarity index 71% rename from core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json rename to core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json index 5e9051587bb..1b50a750dac 100644 --- a/core/lib/dal/.sqlx/query-5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355.json +++ b/core/lib/dal/.sqlx/query-f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", + "query": "\n SELECT\n id AS \"minor!\",\n timestamp,\n bootloader_code_hash,\n default_account_code_hash,\n evm_emulator_code_hash,\n upgrade_tx_hash\n FROM\n protocol_versions\n WHERE\n id = $1\n ", "describe": { "columns": [ { @@ -25,6 +25,11 @@ }, { "ordinal": 4, + "name": "evm_emulator_code_hash", + "type_info": "Bytea" + }, + { + "ordinal": 5, "name": "upgrade_tx_hash", "type_info": "Bytea" } @@ -39,8 +44,9 @@ false, false, false, + true, true ] }, - "hash": "5556ebdb040428b42c04ea9121b3c2a3d0a09c5ee88bdd671462904d4d27a355" + "hash": "f9a47bd5532fc10dd0bc1be2af45b243bb067514b67daaf084353e5ada15b23a" } diff --git a/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json new file mode 100644 index 00000000000..12e28266fbc --- /dev/null +++ b/core/lib/dal/.sqlx/query-fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n proofs.l1_batch_number\n FROM\n tee_proof_generation_details AS proofs\n WHERE\n proofs.status = $1\n ORDER BY\n proofs.l1_batch_number ASC\n LIMIT\n 1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Text" + ] + }, + "nullable": [ + false + ] + }, + "hash": "fb7c16bff10a6e7b4ff1c5975d79542f0dba42101b32e026751362e169381662" +} diff --git a/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json similarity index 58% rename from core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json rename to core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json index 61497cdb169..c34d38ac2d0 100644 --- a/core/lib/dal/.sqlx/query-d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977.json +++ b/core/lib/dal/.sqlx/query-fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n WHERE\n number >= $1\n ORDER BY\n number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n certificate\n FROM\n miniblocks_consensus\n ORDER BY\n number DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -10,13 +10,11 @@ } ], "parameters": { - "Left": [ - "Int8" - ] + "Left": [] }, "nullable": [ false ] }, - "hash": "d3d472436f1f3a6cc61bc9d47de5731b755cf2e09d877dd4eb70d58a1d11a977" + "hash": "fec7b791e371a4c58350b6537065223f4599d4128db588d8645f3d106de5f50b" } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index ccca49525e4..db03b8de982 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -22,8 +22,11 @@ zksync_types.workspace = true zksync_concurrency.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_storage.workspace = true +zksync_consensus_crypto.workspace = true +zksync_consensus_utils.workspace = true zksync_protobuf.workspace = true zksync_db_connection.workspace = true +zksync_l1_contract_interface.workspace = true itertools.workspace = true thiserror.workspace = true diff --git a/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql new file mode 100644 index 00000000000..3706fc6630b --- /dev/null +++ b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.down.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + RENAME COLUMN is_sealed TO is_finished; +ALTER table l1_batches + DROP COLUMN fair_pubdata_price, + DROP COLUMN fee_address; diff --git a/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql new file mode 100644 index 00000000000..6b08546ea1e --- /dev/null +++ b/core/lib/dal/migrations/20240910112120_unsealed_batches_in_db.up.sql @@ -0,0 +1,5 @@ +ALTER TABLE l1_batches + RENAME COLUMN is_finished TO is_sealed; +ALTER table l1_batches + ADD COLUMN fair_pubdata_price bigint NOT NULL DEFAULT 0, + ADD COLUMN fee_address bytea NOT NULL DEFAULT '\x0000000000000000000000000000000000000000'::bytea; diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql new file mode 100644 index 00000000000..74ac4e60383 --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.down.sql @@ -0,0 +1,3 @@ +ALTER TABLE protocol_versions DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE l1_batches DROP COLUMN IF EXISTS evm_emulator_code_hash; +ALTER TABLE miniblocks DROP COLUMN IF EXISTS evm_emulator_code_hash; diff --git a/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql new file mode 100644 index 00000000000..43ae361e7ee --- /dev/null +++ b/core/lib/dal/migrations/20240911161714_evm-simulator.up.sql @@ -0,0 +1,4 @@ +ALTER TABLE protocol_versions ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +ALTER TABLE l1_batches ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; +-- We need this column in `miniblocks` as well in order to store data for the pending L1 batch +ALTER TABLE miniblocks ADD COLUMN IF NOT EXISTS evm_emulator_code_hash BYTEA; diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql new file mode 100644 index 00000000000..707ce306365 --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.down.sql @@ -0,0 +1,20 @@ +CREATE TABLE tee_verifier_input_producer_jobs ( + l1_batch_number BIGINT NOT NULL, + status TEXT NOT NULL, + signature BYTEA, + pubkey BYTEA, + proof BYTEA, + tee_type TEXT NOT NULL, + created_at TIMESTAMP NOT NULL, + updated_at TIMESTAMP NOT NULL, + prover_taken_at TIMESTAMP, + PRIMARY KEY (l1_batch_number, tee_type), + CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey FOREIGN KEY (l1_batch_number) REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) ON DELETE CASCADE, + CONSTRAINT tee_proof_generation_details_pubkey_fkey FOREIGN KEY (pubkey) REFERENCES tee_attestations(pubkey) ON DELETE SET NULL +); + +ALTER TABLE tee_proof_generation_details + ADD CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey + FOREIGN KEY (l1_batch_number) + REFERENCES tee_verifier_input_producer_jobs(l1_batch_number) + ON DELETE CASCADE; diff --git a/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql new file mode 100644 index 00000000000..c2417ba86b3 --- /dev/null +++ b/core/lib/dal/migrations/20241001110000_remove_tee_verifier_input_producer_job.up.sql @@ -0,0 +1,3 @@ +ALTER TABLE tee_proof_generation_details DROP CONSTRAINT tee_proof_generation_details_l1_batch_number_fkey; + +DROP TABLE IF EXISTS tee_verifier_input_producer_jobs; diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index daa0fd8b79a..f1419865601 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -17,9 +17,10 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, block::{ BlockGasCount, L1BatchHeader, L1BatchStatistics, L1BatchTreeData, L2BlockHeader, - StorageOracleInfo, + StorageOracleInfo, UnsealedL1BatchHeader, }, commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata}, + fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, writes::TreeWrite, Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256, @@ -30,7 +31,9 @@ pub use crate::models::storage_block::{L1BatchMetadataError, L1BatchWithOptional use crate::{ models::{ parse_protocol_version, - storage_block::{StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader}, + storage_block::{ + StorageL1Batch, StorageL1BatchHeader, StorageL2BlockHeader, UnsealedStorageL1Batch, + }, storage_event::StorageL2ToL1Log, storage_oracle_info::DbStorageOracleInfo, }, @@ -89,6 +92,8 @@ impl BlocksDal<'_, '_> { COUNT(*) AS "count!" FROM l1_batches + WHERE + is_sealed "# ) .instrument("is_genesis_needed") @@ -105,6 +110,8 @@ impl BlocksDal<'_, '_> { MAX(number) AS "number" FROM l1_batches + WHERE + is_sealed "# ) .instrument("get_sealed_l1_batch_number") @@ -140,6 +147,8 @@ impl BlocksDal<'_, '_> { MIN(number) AS "number" FROM l1_batches + WHERE + is_sealed "# ) .instrument("get_earliest_l1_batch_number") @@ -325,6 +334,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -334,6 +344,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -345,7 +356,8 @@ impl BlocksDal<'_, '_> { data_availability ON data_availability.l1_batch_number = l1_batches.number WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -373,13 +385,16 @@ impl BlocksDal<'_, '_> { used_contract_hashes, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, system_logs, - pubdata_input + pubdata_input, + fee_address FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -412,7 +427,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -443,7 +459,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(number.0) ) @@ -564,7 +581,109 @@ impl BlocksDal<'_, '_> { Ok(()) } + /// Inserts an unsealed L1 batch with some basic information (i.e. runtime related data is either + /// null or set to default value for the corresponding type). pub async fn insert_l1_batch( + &mut self, + unsealed_batch_header: UnsealedL1BatchHeader, + ) -> DalResult<()> { + Self::insert_l1_batch_inner(unsealed_batch_header, self.storage).await + } + + async fn insert_l1_batch_inner( + unsealed_batch_header: UnsealedL1BatchHeader, + conn: &mut Connection<'_, Core>, + ) -> DalResult<()> { + sqlx::query!( + r#" + INSERT INTO + l1_batches ( + number, + timestamp, + protocol_version, + fee_address, + l1_gas_price, + l2_fair_gas_price, + fair_pubdata_price, + l1_tx_count, + l2_tx_count, + bloom, + priority_ops_onchain_data, + initial_bootloader_heap_content, + used_contract_hashes, + created_at, + updated_at, + is_sealed + ) + VALUES + ( + $1, + $2, + $3, + $4, + $5, + $6, + $7, + 0, + 0, + ''::bytea, + '{}'::bytea [], + '{}'::jsonb, + '{}'::jsonb, + NOW(), + NOW(), + FALSE + ) + "#, + i64::from(unsealed_batch_header.number.0), + unsealed_batch_header.timestamp as i64, + unsealed_batch_header.protocol_version.map(|v| v as i32), + unsealed_batch_header.fee_address.as_bytes(), + unsealed_batch_header.fee_input.l1_gas_price() as i64, + unsealed_batch_header.fee_input.fair_l2_gas_price() as i64, + unsealed_batch_header.fee_input.fair_pubdata_price() as i64, + ) + .instrument("insert_l1_batch") + .with_arg("number", &unsealed_batch_header.number) + .execute(conn) + .await?; + Ok(()) + } + + pub async fn ensure_unsealed_l1_batch_exists( + &mut self, + unsealed_batch: UnsealedL1BatchHeader, + ) -> anyhow::Result<()> { + let mut transaction = self.storage.start_transaction().await?; + let unsealed_batch_fetched = Self::get_unsealed_l1_batch_inner(&mut transaction).await?; + + match unsealed_batch_fetched { + None => { + tracing::info!( + "Unsealed batch #{} could not be found; inserting", + unsealed_batch.number + ); + Self::insert_l1_batch_inner(unsealed_batch, &mut transaction).await?; + } + Some(unsealed_batch_fetched) => { + if unsealed_batch_fetched.number != unsealed_batch.number { + anyhow::bail!( + "fetched unsealed L1 batch #{} does not conform to expected L1 batch #{}", + unsealed_batch_fetched.number, + unsealed_batch.number + ) + } + } + } + + transaction.commit().await?; + Ok(()) + } + + /// Marks provided L1 batch as sealed and populates it with all the runtime information. + /// + /// Errors if the batch does not exist. + pub async fn mark_l1_batch_as_sealed( &mut self, header: &L1BatchHeader, initial_bootloader_contents: &[(usize, U256)], @@ -572,9 +691,9 @@ impl BlocksDal<'_, '_> { storage_refunds: &[u32], pubdata_costs: &[i32], predicted_circuits_by_type: CircuitStatistic, // predicted number of circuits for each circuit type - ) -> DalResult<()> { + ) -> anyhow::Result<()> { let initial_bootloader_contents_len = initial_bootloader_contents.len(); - let instrumentation = Instrumented::new("insert_l1_batch") + let instrumentation = Instrumented::new("mark_l1_batch_as_sealed") .with_arg("number", &header.number) .with_arg( "initial_bootloader_contents.len", @@ -601,61 +720,35 @@ impl BlocksDal<'_, '_> { let query = sqlx::query!( r#" - INSERT INTO - l1_batches ( - number, - l1_tx_count, - l2_tx_count, - timestamp, - l2_to_l1_messages, - bloom, - priority_ops_onchain_data, - predicted_commit_gas_cost, - predicted_prove_gas_cost, - predicted_execute_gas_cost, - initial_bootloader_heap_content, - used_contract_hashes, - bootloader_code_hash, - default_aa_code_hash, - protocol_version, - system_logs, - storage_refunds, - pubdata_costs, - pubdata_input, - predicted_circuits_by_type, - created_at, - updated_at - ) - VALUES - ( - $1, - $2, - $3, - $4, - $5, - $6, - $7, - $8, - $9, - $10, - $11, - $12, - $13, - $14, - $15, - $16, - $17, - $18, - $19, - $20, - NOW(), - NOW() - ) + UPDATE l1_batches + SET + l1_tx_count = $2, + l2_tx_count = $3, + l2_to_l1_messages = $4, + bloom = $5, + priority_ops_onchain_data = $6, + predicted_commit_gas_cost = $7, + predicted_prove_gas_cost = $8, + predicted_execute_gas_cost = $9, + initial_bootloader_heap_content = $10, + used_contract_hashes = $11, + bootloader_code_hash = $12, + default_aa_code_hash = $13, + evm_emulator_code_hash = $14, + protocol_version = $15, + system_logs = $16, + storage_refunds = $17, + pubdata_costs = $18, + pubdata_input = $19, + predicted_circuits_by_type = $20, + updated_at = NOW(), + is_sealed = TRUE + WHERE + number = $1 "#, i64::from(header.number.0), i32::from(header.l1_tx_count), i32::from(header.l2_tx_count), - header.timestamp as i64, &header.l2_to_l1_messages, header.bloom.as_bytes(), &priority_onchain_data, @@ -666,6 +759,11 @@ impl BlocksDal<'_, '_> { used_contract_hashes, header.base_system_contracts_hashes.bootloader.as_bytes(), header.base_system_contracts_hashes.default_aa.as_bytes(), + header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), header.protocol_version.map(|v| v as i32), &system_logs, &storage_refunds, @@ -673,13 +771,47 @@ impl BlocksDal<'_, '_> { pubdata_input, serde_json::to_value(predicted_circuits_by_type).unwrap(), ); + let update_result = instrumentation.with(query).execute(self.storage).await?; - let mut transaction = self.storage.start_transaction().await?; - instrumentation - .with(query) - .execute(&mut transaction) - .await?; - transaction.commit().await + if update_result.rows_affected() == 0 { + anyhow::bail!( + "L1 batch sealing failed: batch #{} was not found", + header.number + ); + } + + Ok(()) + } + + pub async fn get_unsealed_l1_batch(&mut self) -> DalResult> { + Self::get_unsealed_l1_batch_inner(self.storage).await + } + + async fn get_unsealed_l1_batch_inner( + conn: &mut Connection<'_, Core>, + ) -> DalResult> { + let batch = sqlx::query_as!( + UnsealedStorageL1Batch, + r#" + SELECT + number, + timestamp, + protocol_version, + fee_address, + l1_gas_price, + l2_fair_gas_price, + fair_pubdata_price + FROM + l1_batches + WHERE + NOT is_sealed + "#, + ) + .instrument("get_unsealed_l1_batch") + .fetch_optional(conn) + .await?; + + Ok(batch.map(|b| b.into())) } pub async fn insert_l2_block(&mut self, l2_block_header: &L2BlockHeader) -> DalResult<()> { @@ -710,13 +842,14 @@ impl BlocksDal<'_, '_> { gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, gas_limit, + logs_bloom, l2_da_validator_address, pubdata_type, - logs_bloom, created_at, updated_at ) @@ -741,6 +874,7 @@ impl BlocksDal<'_, '_> { $17, $18, $19, + $20, NOW(), NOW() ) @@ -763,16 +897,21 @@ impl BlocksDal<'_, '_> { .base_system_contracts_hashes .default_aa .as_bytes(), + l2_block_header + .base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), l2_block_header.protocol_version.map(|v| v as i32), i64::from(l2_block_header.virtual_blocks), l2_block_header.batch_fee_input.fair_pubdata_price() as i64, l2_block_header.gas_limit as i64, + l2_block_header.logs_bloom.as_bytes(), l2_block_header .pubdata_params .l2_da_validator_address .as_bytes(), l2_block_header.pubdata_params.pubdata_type.to_string(), - l2_block_header.logs_bloom.as_bytes(), ); instrumentation.with(query).execute(self.storage).await?; @@ -790,19 +929,20 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, fee_account_address AS "fee_account_address!", - l2_da_validator_address AS "l2_da_validator_address!", - pubdata_type AS "pubdata_type!", base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks ORDER BY @@ -832,19 +972,20 @@ impl BlocksDal<'_, '_> { l1_tx_count, l2_tx_count, fee_account_address AS "fee_account_address!", - l2_da_validator_address AS "l2_da_validator_address!", - pubdata_type AS "pubdata_type!", base_fee_per_gas, l1_gas_price, l2_fair_gas_price, gas_per_pubdata_limit, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, protocol_version, virtual_blocks, fair_pubdata_price, gas_limit, - logs_bloom + logs_bloom, + l2_da_validator_address, + pubdata_type FROM miniblocks WHERE @@ -1064,6 +1205,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1073,6 +1215,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1257,6 +1400,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1266,6 +1410,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1344,6 +1489,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1353,6 +1499,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1422,6 +1569,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1431,6 +1579,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1580,6 +1729,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, bootloader_code_hash, default_aa_code_hash, + evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1589,6 +1739,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1651,6 +1802,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1660,6 +1812,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1736,6 +1889,7 @@ impl BlocksDal<'_, '_> { zkporter_is_available, l1_batches.bootloader_code_hash, l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, aux_data_hash, pass_through_data_hash, meta_parameters_hash, @@ -1745,6 +1899,7 @@ impl BlocksDal<'_, '_> { events_queue_commitment, bootloader_initial_content_commitment, pubdata_input, + fee_address, aggregation_root, local_root, state_diff_hash, @@ -1997,6 +2152,37 @@ impl BlocksDal<'_, '_> { Ok(()) } + /// Deletes the unsealed L1 batch from the storage. Expects the caller to make sure there are no + /// associated L2 blocks. + /// + /// Accepts `batch_to_keep` as a safety mechanism. + pub async fn delete_unsealed_l1_batch( + &mut self, + batch_to_keep: L1BatchNumber, + ) -> DalResult<()> { + let deleted_row = sqlx::query!( + r#" + DELETE FROM l1_batches + WHERE + number > $1 + AND NOT is_sealed + RETURNING number + "#, + i64::from(batch_to_keep.0) + ) + .instrument("delete_unsealed_l1_batch") + .with_arg("batch_to_keep", &batch_to_keep) + .fetch_optional(self.storage) + .await?; + if let Some(deleted_row) = deleted_row { + tracing::info!( + l1_batch_number = %deleted_row.number, + "Deleted unsealed batch" + ); + } + Ok(()) + } + /// Deletes all L1 batches from the storage so that the specified batch number is the last one left. pub async fn delete_l1_batches(&mut self, last_batch_to_keep: L1BatchNumber) -> DalResult<()> { self.delete_l1_batches_inner(Some(last_batch_to_keep)).await @@ -2147,7 +2333,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_commit_tx_id IS NULL + is_sealed + AND eth_commit_tx_id IS NULL AND number > 0 ORDER BY number @@ -2169,7 +2356,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_prove_tx_id IS NULL + is_sealed + AND eth_prove_tx_id IS NULL AND number > 0 ORDER BY number @@ -2191,7 +2379,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - eth_execute_tx_id IS NULL + is_sealed + AND eth_execute_tx_id IS NULL AND number > 0 ORDER BY number @@ -2216,7 +2405,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - number = $1 + is_sealed + AND number = $1 "#, i64::from(l1_batch_number.0) ) @@ -2286,7 +2476,8 @@ impl BlocksDal<'_, '_> { FROM l1_batches WHERE - protocol_version = $1 + is_sealed + AND protocol_version = $1 "#, protocol_version as i32 ) @@ -2592,8 +2783,12 @@ impl BlocksDal<'_, '_> { Ok(()) } - pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> DalResult<()> { + pub async fn insert_mock_l1_batch(&mut self, header: &L1BatchHeader) -> anyhow::Result<()> { self.insert_l1_batch( + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), + ) + .await?; + self.mark_l1_batch_as_sealed( header, &[], Default::default(), @@ -2789,6 +2984,40 @@ mod tests { .is_err()); } + #[tokio::test] + async fn persisting_evm_emulator_hash() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut l2_block_header = create_l2_block_header(1); + l2_block_header.base_system_contracts_hashes.evm_emulator = Some(H256::repeat_byte(0x23)); + conn.blocks_dal() + .insert_l2_block(&l2_block_header) + .await + .unwrap(); + + let mut fetched_block_header = conn + .blocks_dal() + .get_last_sealed_l2_block_header() + .await + .unwrap() + .expect("no block"); + // Batch fee input isn't restored exactly + fetched_block_header.batch_fee_input = l2_block_header.batch_fee_input; + + assert_eq!(fetched_block_header, l2_block_header); + // ...and a sanity check just to be sure + assert!(fetched_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some()); + } + #[tokio::test] async fn loading_l1_batch_header() { let pool = ConnectionPool::::test_pool().await; @@ -2870,7 +3099,13 @@ mod tests { execute: 10, }; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) + .insert_l1_batch( + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l1_batch_as_sealed(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); @@ -2878,7 +3113,13 @@ mod tests { header.timestamp += 100; predicted_gas += predicted_gas; conn.blocks_dal() - .insert_l1_batch(&header, &[], predicted_gas, &[], &[], Default::default()) + .insert_l1_batch( + header.to_unsealed_header(BatchFeeInput::pubdata_independent(100, 100, 100)), + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l1_batch_as_sealed(&header, &[], predicted_gas, &[], &[], Default::default()) .await .unwrap(); diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 904e167d1a6..4cb57798638 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -5,6 +5,7 @@ use zksync_db_connection::{ use zksync_system_constants::EMPTY_UNCLES_HASH; use zksync_types::{ api, + debug_flat_call::CallTraceMeta, fee_model::BatchFeeInput, l2_to_l1_log::L2ToL1Log, web3::{BlockHeader, Bytes}, @@ -531,11 +532,12 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_traces_for_l2_block( &mut self, block_number: L2BlockNumber, - ) -> DalResult> { - let protocol_version = sqlx::query!( + ) -> DalResult> { + let row = sqlx::query!( r#" SELECT - protocol_version + protocol_version, + hash FROM miniblocks WHERE @@ -543,14 +545,20 @@ impl BlocksWeb3Dal<'_, '_> { "#, i64::from(block_number.0) ) - .try_map(|row| row.protocol_version.map(parse_protocol_version).transpose()) + .try_map(|row| { + row.protocol_version + .map(parse_protocol_version) + .transpose() + .map(|val| (val, H256::from_slice(&row.hash))) + }) .instrument("get_traces_for_l2_block#get_l2_block_protocol_version_id") .with_arg("l2_block_number", &block_number) .fetch_optional(self.storage) .await?; - let Some(protocol_version) = protocol_version else { + let Some((protocol_version, block_hash)) = row else { return Ok(Vec::new()); }; + let protocol_version = protocol_version.unwrap_or_else(ProtocolVersionId::last_potentially_undefined); @@ -577,9 +585,15 @@ impl BlocksWeb3Dal<'_, '_> { .await? .into_iter() .map(|call_trace| { - let hash = H256::from_slice(&call_trace.tx_hash); + let tx_hash = H256::from_slice(&call_trace.tx_hash); let index = call_trace.tx_index_in_block.unwrap_or_default() as usize; - (call_trace.into_call(protocol_version), hash, index) + let meta = CallTraceMeta { + index_in_block: index, + tx_hash, + block_number: block_number.0, + block_hash, + }; + (call_trace.into_call(protocol_version), meta) }) .collect()) } @@ -656,6 +670,8 @@ impl BlocksWeb3Dal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ) ) AS "l1_batch_number!", miniblocks.timestamp, @@ -673,6 +689,7 @@ impl BlocksWeb3Dal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + l1_batches.evm_emulator_code_hash, miniblocks.protocol_version, miniblocks.fee_account_address FROM @@ -744,7 +761,8 @@ impl BlocksWeb3Dal<'_, '_> { mb.l2_fair_gas_price, mb.fair_pubdata_price, l1_batches.bootloader_code_hash, - l1_batches.default_aa_code_hash + l1_batches.default_aa_code_hash, + l1_batches.evm_emulator_code_hash FROM l1_batches INNER JOIN mb ON TRUE @@ -1101,9 +1119,9 @@ mod tests { .await .unwrap(); assert_eq!(traces.len(), 2); - for ((trace, hash, _index), tx_result) in traces.iter().zip(&tx_results) { + for ((trace, meta), tx_result) in traces.iter().zip(&tx_results) { let expected_trace = tx_result.call_trace().unwrap(); - assert_eq!(&tx_result.hash, hash); + assert_eq!(tx_result.hash, meta.tx_hash); assert_eq!(*trace, expected_trace); } } diff --git a/core/lib/dal/src/consensus/conv.rs b/core/lib/dal/src/consensus/conv.rs new file mode 100644 index 00000000000..f0948adfd1d --- /dev/null +++ b/core/lib/dal/src/consensus/conv.rs @@ -0,0 +1,589 @@ +//! Protobuf conversion functions. +use anyhow::{anyhow, Context as _}; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node}; +use zksync_protobuf::{read_optional_repr, read_required, required, ProtoFmt, ProtoRepr}; +use zksync_types::{ + abi, + commitment::{L1BatchCommitmentMode, PubdataParams}, + ethabi, + fee::Fee, + l1::{OpProcessingType, PriorityQueueType}, + l2::TransactionType, + parse_h160, parse_h256, + protocol_upgrade::ProtocolUpgradeTxCommonData, + transaction_request::PaymasterParams, + Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, L2TxCommonData, + Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, +}; +use zksync_utils::{h256_to_u256, u256_to_h256}; + +use super::*; + +impl ProtoFmt for BlockMetadata { + type Proto = proto::BlockMetadata; + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + payload_hash: read_required(&r.payload_hash).context("payload_hash")?, + }) + } + fn build(&self) -> Self::Proto { + Self::Proto { + payload_hash: Some(self.payload_hash.build()), + } + } +} + +impl ProtoRepr for proto::NodeAddr { + type Type = (node::PublicKey, net::Host); + fn read(&self) -> anyhow::Result { + Ok(( + read_required(&self.key).context("key")?, + net::Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0.build()), + addr: Some(this.1 .0.clone()), + } + } +} + +impl ProtoFmt for GlobalConfig { + type Proto = proto::GlobalConfig; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + registry_address: r + .registry_address + .as_ref() + .map(|a| parse_h160(a)) + .transpose() + .context("registry_address")?, + seed_peers: r + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + seed_peers: self + .seed_peers + .iter() + .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) + .collect(), + } + } +} +impl ProtoFmt for AttestationStatus { + type Proto = proto::AttestationStatus; + + fn read(r: &Self::Proto) -> anyhow::Result { + Ok(Self { + genesis: read_required(&r.genesis).context("genesis")?, + next_batch_to_attest: attester::BatchNumber( + *required(&r.next_batch_to_attest).context("next_batch_to_attest")?, + ), + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + genesis: Some(self.genesis.build()), + next_batch_to_attest: Some(self.next_batch_to_attest.0), + } + } +} + +impl ProtoRepr for proto::PubdataParams { + type Type = PubdataParams; + + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + l2_da_validator_address: required(&self.l2_da_validator_address) + .and_then(|a| parse_h160(a)) + .context("l2_da_validator_address")?, + pubdata_type: required(&self.pubdata_type) + .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) + .context("pubdata_type")? + .parse(), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + l2_da_validator_address: Some(this.l2_da_validator_address.as_bytes().into()), + pubdata_type: Some( + proto::L1BatchCommitDataGeneratorMode::new(&this.pubdata_type) as i32, + ), + } + } +} + +impl ProtoFmt for Payload { + type Proto = proto::Payload; + + fn read(r: &Self::Proto) -> anyhow::Result { + let protocol_version = required(&r.protocol_version) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("protocol_version")?; + let mut transactions = vec![]; + + match protocol_version { + v if v >= ProtocolVersionId::Version25 => { + anyhow::ensure!( + r.transactions.is_empty(), + "transactions should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions_v25.iter().enumerate() { + transactions.push( + tx.read() + .with_context(|| format!("transactions_v25[{i}]"))?, + ); + } + } + v => { + anyhow::ensure!( + r.transactions_v25.is_empty(), + "transactions_v25 should be empty in protocol_version {v}" + ); + for (i, tx) in r.transactions.iter().enumerate() { + transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) + } + } + } + + let this = Self { + protocol_version, + hash: required(&r.hash) + .and_then(|h| parse_h256(h)) + .context("hash")?, + l1_batch_number: L1BatchNumber( + *required(&r.l1_batch_number).context("l1_batch_number")?, + ), + timestamp: *required(&r.timestamp).context("timestamp")?, + l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, + l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, + fair_pubdata_price: r.fair_pubdata_price, + virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, + operator_address: required(&r.operator_address) + .and_then(|a| parse_h160(a)) + .context("operator_address")?, + transactions, + last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, + pubdata_params: read_optional_repr(&r.pubdata_params) + .context("pubdata_params")? + .unwrap_or_default(), + }; + if this.protocol_version.is_pre_gateway() { + anyhow::ensure!( + this.pubdata_params == PubdataParams::default(), + "pubdata_params should have the default value in pre-gateway protocol_version" + ); + } + if this.pubdata_params == PubdataParams::default() { + anyhow::ensure!( + r.pubdata_params.is_none(), + "default pubdata_params should be encoded as None" + ); + } + Ok(this) + } + + fn build(&self) -> Self::Proto { + if self.protocol_version.is_pre_gateway() { + assert_eq!( + self.pubdata_params, PubdataParams::default(), + "BUG DETECTED: pubdata_params should have the default value in pre-gateway protocol_version" + ); + } + let mut x = Self::Proto { + protocol_version: Some((self.protocol_version as u16).into()), + hash: Some(self.hash.as_bytes().into()), + l1_batch_number: Some(self.l1_batch_number.0), + timestamp: Some(self.timestamp), + l1_gas_price: Some(self.l1_gas_price), + l2_fair_gas_price: Some(self.l2_fair_gas_price), + fair_pubdata_price: self.fair_pubdata_price, + virtual_blocks: Some(self.virtual_blocks), + operator_address: Some(self.operator_address.as_bytes().into()), + // Transactions are stored in execution order, therefore order is deterministic. + transactions: vec![], + transactions_v25: vec![], + last_in_batch: Some(self.last_in_batch), + pubdata_params: if self.pubdata_params == PubdataParams::default() { + None + } else { + Some(ProtoRepr::build(&self.pubdata_params)) + }, + }; + match self.protocol_version { + v if v >= ProtocolVersionId::Version25 => { + x.transactions_v25 = self.transactions.iter().map(ProtoRepr::build).collect(); + } + _ => { + x.transactions = self.transactions.iter().map(ProtoRepr::build).collect(); + } + } + x + } +} + +impl ProtoRepr for proto::TransactionV25 { + type Type = Transaction; + + fn read(&self) -> anyhow::Result { + use proto::transaction_v25::T; + let tx = match required(&self.t)? { + T::L1(l1) => abi::Transaction::L1 { + tx: required(&l1.rlp) + .and_then(|x| { + let tokens = ethabi::decode(&[abi::L2CanonicalTransaction::schema()], x) + .context("ethabi::decode()")?; + // Unwrap is safe because `ethabi::decode` does the verification. + let tx = + abi::L2CanonicalTransaction::decode(tokens.into_iter().next().unwrap()) + .context("L2CanonicalTransaction::decode()")?; + Ok(tx) + }) + .context("rlp")? + .into(), + factory_deps: l1.factory_deps.clone(), + eth_block: 0, + }, + T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), + }; + Transaction::from_abi(tx, true) + } + + fn build(tx: &Self::Type) -> Self { + let tx = abi::Transaction::try_from(tx.clone()).unwrap(); + use proto::transaction_v25::T; + Self { + t: Some(match tx { + abi::Transaction::L1 { + tx, factory_deps, .. + } => T::L1(proto::L1Transaction { + rlp: Some(ethabi::encode(&[tx.encode()])), + factory_deps, + }), + abi::Transaction::L2(tx) => T::L2(proto::L2Transaction { rlp: Some(tx) }), + }), + } + } +} + +impl ProtoRepr for proto::Transaction { + type Type = Transaction; + + fn read(&self) -> anyhow::Result { + let common_data = required(&self.common_data).context("common_data")?; + let execute = required(&self.execute).context("execute")?; + Ok(Self::Type { + common_data: match common_data { + proto::transaction::CommonData::L1(common_data) => { + anyhow::ensure!( + *required(&common_data.deadline_block) + .context("common_data.deadline_block")? + == 0 + ); + anyhow::ensure!( + required(&common_data.eth_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.eth_hash")? + == H256::default() + ); + ExecuteTransactionCommon::L1(L1TxCommonData { + sender: required(&common_data.sender_address) + .and_then(|x| parse_h160(x)) + .context("common_data.sender_address")?, + serial_id: required(&common_data.serial_id) + .map(|x| PriorityOpId(*x)) + .context("common_data.serial_id")?, + layer_2_tip_fee: required(&common_data.layer_2_tip_fee) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.layer_2_tip_fee")?, + full_fee: required(&common_data.full_fee) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.full_fee")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + op_processing_type: required(&common_data.op_processing_type) + .and_then(|x| { + OpProcessingType::try_from(u8::try_from(*x)?) + .map_err(|_| anyhow!("u8::try_from")) + }) + .context("common_data.op_processing_type")?, + priority_queue_type: required(&common_data.priority_queue_type) + .and_then(|x| { + PriorityQueueType::try_from(u8::try_from(*x)?) + .map_err(|_| anyhow!("u8::try_from")) + }) + .context("common_data.priority_queue_type")?, + eth_block: *required(&common_data.eth_block) + .context("common_data.eth_block")?, + canonical_tx_hash: required(&common_data.canonical_tx_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.canonical_tx_hash")?, + to_mint: required(&common_data.to_mint) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.to_mint")?, + refund_recipient: required(&common_data.refund_recipient_address) + .and_then(|x| parse_h160(x)) + .context("common_data.refund_recipient_address")?, + }) + } + proto::transaction::CommonData::L2(common_data) => { + ExecuteTransactionCommon::L2(L2TxCommonData { + nonce: required(&common_data.nonce) + .map(|x| Nonce(*x)) + .context("common_data.nonce")?, + fee: Fee { + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + max_priority_fee_per_gas: required( + &common_data.max_priority_fee_per_gas, + ) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_priority_fee_per_gas")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + }, + initiator_address: required(&common_data.initiator_address) + .and_then(|x| parse_h160(x)) + .context("common_data.initiator_address")?, + signature: required(&common_data.signature) + .context("common_data.signature")? + .clone(), + transaction_type: required(&common_data.transaction_type) + .and_then(|x| Ok(TransactionType::try_from(*x)?)) + .context("common_data.transaction_type")?, + input: { + match &common_data.input { + None => None, + Some(input) => Some(InputData { + hash: required(&input.hash) + .and_then(|x| parse_h256(x)) + .context("common_data.input.hash")?, + data: required(&input.data) + .context("common_data.input.data")? + .clone(), + }), + } + }, + paymaster_params: { + let params = required(&common_data.paymaster_params)?; + PaymasterParams { + paymaster: required(¶ms.paymaster_address) + .and_then(|x| parse_h160(x)) + .context("common_data.paymaster_params.paymaster_address")?, + paymaster_input: required(¶ms.paymaster_input) + .context("common_data.paymaster_params.paymaster_input")? + .clone(), + } + }, + }) + } + proto::transaction::CommonData::ProtocolUpgrade(common_data) => { + ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { + sender: required(&common_data.sender_address) + .and_then(|x| parse_h160(x)) + .context("common_data.sender_address")?, + upgrade_id: required(&common_data.upgrade_id) + .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) + .context("common_data.upgrade_id")?, + max_fee_per_gas: required(&common_data.max_fee_per_gas) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.max_fee_per_gas")?, + gas_limit: required(&common_data.gas_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_limit")?, + gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.gas_per_pubdata_limit")?, + eth_block: *required(&common_data.eth_block) + .context("common_data.eth_block")?, + canonical_tx_hash: required(&common_data.canonical_tx_hash) + .and_then(|x| parse_h256(x)) + .context("common_data.canonical_tx_hash")?, + to_mint: required(&common_data.to_mint) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("common_data.to_mint")?, + refund_recipient: required(&common_data.refund_recipient_address) + .and_then(|x| parse_h160(x)) + .context("common_data.refund_recipient_address")?, + }) + } + }, + execute: Execute { + contract_address: execute + .contract_address + .as_ref() + .and_then(|x| parse_h160(x).ok()), + calldata: required(&execute.calldata).context("calldata")?.clone(), + value: required(&execute.value) + .and_then(|x| parse_h256(x)) + .map(h256_to_u256) + .context("execute.value")?, + factory_deps: execute.factory_deps.clone(), + }, + received_timestamp_ms: 0, // This timestamp is local to the node + raw_bytes: self.raw_bytes.as_ref().map(|x| x.clone().into()), + }) + } + + fn build(this: &Self::Type) -> Self { + let common_data = match &this.common_data { + ExecuteTransactionCommon::L1(data) => { + proto::transaction::CommonData::L1(proto::L1TxCommonData { + sender_address: Some(data.sender.as_bytes().into()), + serial_id: Some(data.serial_id.0), + deadline_block: Some(0), + layer_2_tip_fee: Some(u256_to_h256(data.layer_2_tip_fee).as_bytes().into()), + full_fee: Some(u256_to_h256(data.full_fee).as_bytes().into()), + max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), + gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), + gas_per_pubdata_limit: Some( + u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), + ), + op_processing_type: Some(data.op_processing_type as u32), + priority_queue_type: Some(data.priority_queue_type as u32), + eth_hash: Some(H256::default().as_bytes().into()), + eth_block: Some(data.eth_block), + canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), + to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), + refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), + }) + } + ExecuteTransactionCommon::L2(data) => { + proto::transaction::CommonData::L2(proto::L2TxCommonData { + nonce: Some(data.nonce.0), + gas_limit: Some(u256_to_h256(data.fee.gas_limit).as_bytes().into()), + max_fee_per_gas: Some(u256_to_h256(data.fee.max_fee_per_gas).as_bytes().into()), + max_priority_fee_per_gas: Some( + u256_to_h256(data.fee.max_priority_fee_per_gas) + .as_bytes() + .into(), + ), + gas_per_pubdata_limit: Some( + u256_to_h256(data.fee.gas_per_pubdata_limit) + .as_bytes() + .into(), + ), + initiator_address: Some(data.initiator_address.as_bytes().into()), + signature: Some(data.signature.clone()), + transaction_type: Some(data.transaction_type as u32), + input: data.input.as_ref().map(|input_data| proto::InputData { + data: Some(input_data.data.clone()), + hash: Some(input_data.hash.as_bytes().into()), + }), + paymaster_params: Some(proto::PaymasterParams { + paymaster_input: Some(data.paymaster_params.paymaster_input.clone()), + paymaster_address: Some(data.paymaster_params.paymaster.as_bytes().into()), + }), + }) + } + ExecuteTransactionCommon::ProtocolUpgrade(data) => { + proto::transaction::CommonData::ProtocolUpgrade( + proto::ProtocolUpgradeTxCommonData { + sender_address: Some(data.sender.as_bytes().into()), + upgrade_id: Some(data.upgrade_id as u32), + max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), + gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), + gas_per_pubdata_limit: Some( + u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), + ), + eth_hash: Some(H256::default().as_bytes().into()), + eth_block: Some(data.eth_block), + canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), + to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), + refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), + }, + ) + } + }; + let execute = proto::Execute { + contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), + calldata: Some(this.execute.calldata.clone()), + value: Some(u256_to_h256(this.execute.value).as_bytes().into()), + factory_deps: this.execute.factory_deps.clone(), + }; + Self { + common_data: Some(common_data), + execute: Some(execute), + raw_bytes: this.raw_bytes.as_ref().map(|inner| inner.0.clone()), + } + } +} + +impl ProtoRepr for proto::AttesterCommittee { + type Type = attester::Committee; + + fn read(&self) -> anyhow::Result { + let members: Vec<_> = self + .members + .iter() + .enumerate() + .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) + .collect::>() + .context("members")?; + Self::Type::new(members) + } + + fn build(this: &Self::Type) -> Self { + Self { + members: this.iter().map(|x| x.build()).collect(), + } + } +} + +impl proto::L1BatchCommitDataGeneratorMode { + pub(crate) fn new(n: &L1BatchCommitmentMode) -> Self { + match n { + L1BatchCommitmentMode::Rollup => Self::Rollup, + L1BatchCommitmentMode::Validium => Self::Validium, + } + } + + pub(crate) fn parse(&self) -> L1BatchCommitmentMode { + match self { + Self::Rollup => L1BatchCommitmentMode::Rollup, + Self::Validium => L1BatchCommitmentMode::Validium, + } + } +} diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index 2a2df0adb45..96efc634835 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -1,31 +1,22 @@ -pub mod proto; - -#[cfg(test)] -mod testonly; -#[cfg(test)] -mod tests; - use std::collections::BTreeMap; -use anyhow::{anyhow, Context as _}; use zksync_concurrency::net; use zksync_consensus_roles::{attester, node, validator}; -use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ - abi, - commitment::{L1BatchCommitmentMode, PubdataParams}, - ethabi, - fee::Fee, - l1::{OpProcessingType, PriorityQueueType}, - l2::TransactionType, - protocol_upgrade::ProtocolUpgradeTxCommonData, - transaction_request::PaymasterParams, - Address, Execute, ExecuteTransactionCommon, InputData, L1BatchNumber, L1TxCommonData, - L2TxCommonData, Nonce, PriorityOpId, ProtocolVersionId, Transaction, H256, + commitment::PubdataParams, ethabi, Address, L1BatchNumber, ProtocolVersionId, Transaction, H256, }; -use zksync_utils::{h256_to_u256, u256_to_h256}; -use crate::models::{parse_h160, parse_h256}; +mod conv; +pub mod proto; +#[cfg(test)] +mod testonly; +#[cfg(test)] +mod tests; + +#[derive(Debug, PartialEq, Clone)] +pub struct BlockMetadata { + pub payload_hash: validator::PayloadHash, +} /// Global config of the consensus. #[derive(Debug, PartialEq, Clone)] @@ -35,57 +26,6 @@ pub struct GlobalConfig { pub seed_peers: BTreeMap, } -impl ProtoRepr for proto::NodeAddr { - type Type = (node::PublicKey, net::Host); - fn read(&self) -> anyhow::Result { - Ok(( - read_required(&self.key).context("key")?, - net::Host(required(&self.addr).context("addr")?.clone()), - )) - } - fn build(this: &Self::Type) -> Self { - Self { - key: Some(this.0.build()), - addr: Some(this.1 .0.clone()), - } - } -} - -impl ProtoFmt for GlobalConfig { - type Proto = proto::GlobalConfig; - - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self { - genesis: read_required(&r.genesis).context("genesis")?, - registry_address: r - .registry_address - .as_ref() - .map(|a| parse_h160(a)) - .transpose() - .context("registry_address")?, - seed_peers: r - .seed_peers - .iter() - .enumerate() - .map(|(i, e)| e.read().context(i)) - .collect::>() - .context("seed_peers")?, - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - genesis: Some(self.genesis.build()), - registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), - seed_peers: self - .seed_peers - .iter() - .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) - .collect(), - } - } -} - /// Global attestation status served by /// `attestationStatus` RPC. #[derive(Debug, PartialEq, Clone)] @@ -94,42 +34,6 @@ pub struct AttestationStatus { pub next_batch_to_attest: attester::BatchNumber, } -impl ProtoFmt for AttestationStatus { - type Proto = proto::AttestationStatus; - - fn read(r: &Self::Proto) -> anyhow::Result { - Ok(Self { - genesis: read_required(&r.genesis).context("genesis")?, - next_batch_to_attest: attester::BatchNumber( - *required(&r.next_batch_to_attest).context("next_batch_to_attest")?, - ), - }) - } - - fn build(&self) -> Self::Proto { - Self::Proto { - genesis: Some(self.genesis.build()), - next_batch_to_attest: Some(self.next_batch_to_attest.0), - } - } -} - -impl proto::L1BatchCommitDataGeneratorMode { - pub(crate) fn new(n: &L1BatchCommitmentMode) -> Self { - match n { - L1BatchCommitmentMode::Rollup => Self::Rollup, - L1BatchCommitmentMode::Validium => Self::Validium, - } - } - - pub(crate) fn parse(&self) -> L1BatchCommitmentMode { - match self { - Self::Rollup => L1BatchCommitmentMode::Rollup, - Self::Validium => L1BatchCommitmentMode::Validium, - } - } -} - /// L2 block (= miniblock) payload. #[derive(Debug, PartialEq)] pub struct Payload { @@ -142,116 +46,9 @@ pub struct Payload { pub fair_pubdata_price: Option, pub virtual_blocks: u32, pub operator_address: Address, - pub pubdata_params: Option, pub transactions: Vec, pub last_in_batch: bool, -} - -impl ProtoFmt for Payload { - type Proto = proto::Payload; - - fn read(r: &Self::Proto) -> anyhow::Result { - let protocol_version = required(&r.protocol_version) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("protocol_version")?; - let mut transactions = vec![]; - - match protocol_version { - v if v >= ProtocolVersionId::Version25 => { - anyhow::ensure!( - r.transactions.is_empty(), - "transactions should be empty in protocol_version {v}" - ); - for (i, tx) in r.transactions_v25.iter().enumerate() { - transactions.push( - tx.read() - .with_context(|| format!("transactions_v25[{i}]"))?, - ); - } - } - v => { - anyhow::ensure!( - r.transactions_v25.is_empty(), - "transactions_v25 should be empty in protocol_version {v}" - ); - for (i, tx) in r.transactions.iter().enumerate() { - transactions.push(tx.read().with_context(|| format!("transactions[{i}]"))?) - } - } - } - - let pubdata_params = if let Some(pubdata_params) = &r.pubdata_params { - Some(PubdataParams { - l2_da_validator_address: required(&pubdata_params.l2_da_validator_address) - .and_then(|a| parse_h160(a)) - .context("operator_address")?, - pubdata_type: required(&pubdata_params.pubdata_type) - .and_then(|x| Ok(proto::L1BatchCommitDataGeneratorMode::try_from(*x)?)) - .context("l1_batch_commit_data_generator_mode")? - .parse(), - }) - } else { - None - }; - - Ok(Self { - protocol_version, - hash: required(&r.hash) - .and_then(|h| parse_h256(h)) - .context("hash")?, - l1_batch_number: L1BatchNumber( - *required(&r.l1_batch_number).context("l1_batch_number")?, - ), - timestamp: *required(&r.timestamp).context("timestamp")?, - l1_gas_price: *required(&r.l1_gas_price).context("l1_gas_price")?, - l2_fair_gas_price: *required(&r.l2_fair_gas_price).context("l2_fair_gas_price")?, - fair_pubdata_price: r.fair_pubdata_price, - virtual_blocks: *required(&r.virtual_blocks).context("virtual_blocks")?, - operator_address: required(&r.operator_address) - .and_then(|a| parse_h160(a)) - .context("operator_address")?, - transactions, - last_in_batch: *required(&r.last_in_batch).context("last_in_batch")?, - pubdata_params, - }) - } - - fn build(&self) -> Self::Proto { - let mut x = Self::Proto { - protocol_version: Some((self.protocol_version as u16).into()), - hash: Some(self.hash.as_bytes().into()), - l1_batch_number: Some(self.l1_batch_number.0), - timestamp: Some(self.timestamp), - l1_gas_price: Some(self.l1_gas_price), - l2_fair_gas_price: Some(self.l2_fair_gas_price), - fair_pubdata_price: self.fair_pubdata_price, - virtual_blocks: Some(self.virtual_blocks), - operator_address: Some(self.operator_address.as_bytes().into()), - // Transactions are stored in execution order, therefore order is deterministic. - transactions: vec![], - transactions_v25: vec![], - last_in_batch: Some(self.last_in_batch), - pubdata_params: self - .pubdata_params - .map(|pubdata_params| proto::PubdataParams { - l2_da_validator_address: Some( - pubdata_params.l2_da_validator_address.as_bytes().into(), - ), - pubdata_type: Some(proto::L1BatchCommitDataGeneratorMode::new( - &pubdata_params.pubdata_type, - ) as i32), - }), - }; - match self.protocol_version { - v if v >= ProtocolVersionId::Version25 => { - x.transactions_v25 = self.transactions.iter().map(ProtoRepr::build).collect(); - } - _ => { - x.transactions = self.transactions.iter().map(ProtoRepr::build).collect(); - } - } - x - } + pub pubdata_params: PubdataParams, } impl Payload { @@ -263,337 +60,3 @@ impl Payload { validator::Payload(zksync_protobuf::encode(self)) } } - -impl ProtoRepr for proto::TransactionV25 { - type Type = Transaction; - - fn read(&self) -> anyhow::Result { - use proto::transaction_v25::T; - let tx = match required(&self.t)? { - T::L1(l1) => abi::Transaction::L1 { - tx: required(&l1.rlp) - .and_then(|x| { - let tokens = ethabi::decode(&[abi::L2CanonicalTransaction::schema()], x) - .context("ethabi::decode()")?; - // Unwrap is safe because `ethabi::decode` does the verification. - let tx = - abi::L2CanonicalTransaction::decode(tokens.into_iter().next().unwrap()) - .context("L2CanonicalTransaction::decode()")?; - Ok(tx) - }) - .context("rlp")? - .into(), - factory_deps: l1.factory_deps.clone(), - eth_block: 0, - }, - T::L2(l2) => abi::Transaction::L2(required(&l2.rlp).context("rlp")?.clone()), - }; - tx.try_into() - } - - fn build(tx: &Self::Type) -> Self { - let tx = abi::Transaction::try_from(tx.clone()).unwrap(); - use proto::transaction_v25::T; - Self { - t: Some(match tx { - abi::Transaction::L1 { - tx, factory_deps, .. - } => T::L1(proto::L1Transaction { - rlp: Some(ethabi::encode(&[tx.encode()])), - factory_deps, - }), - abi::Transaction::L2(tx) => T::L2(proto::L2Transaction { rlp: Some(tx) }), - }), - } - } -} - -impl ProtoRepr for proto::Transaction { - type Type = Transaction; - - fn read(&self) -> anyhow::Result { - let common_data = required(&self.common_data).context("common_data")?; - let execute = required(&self.execute).context("execute")?; - Ok(Self::Type { - common_data: match common_data { - proto::transaction::CommonData::L1(common_data) => { - anyhow::ensure!( - *required(&common_data.deadline_block) - .context("common_data.deadline_block")? - == 0 - ); - anyhow::ensure!( - required(&common_data.eth_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.eth_hash")? - == H256::default() - ); - ExecuteTransactionCommon::L1(L1TxCommonData { - sender: required(&common_data.sender_address) - .and_then(|x| parse_h160(x)) - .context("common_data.sender_address")?, - serial_id: required(&common_data.serial_id) - .map(|x| PriorityOpId(*x)) - .context("common_data.serial_id")?, - layer_2_tip_fee: required(&common_data.layer_2_tip_fee) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.layer_2_tip_fee")?, - full_fee: required(&common_data.full_fee) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.full_fee")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - op_processing_type: required(&common_data.op_processing_type) - .and_then(|x| { - OpProcessingType::try_from(u8::try_from(*x)?) - .map_err(|_| anyhow!("u8::try_from")) - }) - .context("common_data.op_processing_type")?, - priority_queue_type: required(&common_data.priority_queue_type) - .and_then(|x| { - PriorityQueueType::try_from(u8::try_from(*x)?) - .map_err(|_| anyhow!("u8::try_from")) - }) - .context("common_data.priority_queue_type")?, - eth_block: *required(&common_data.eth_block) - .context("common_data.eth_block")?, - canonical_tx_hash: required(&common_data.canonical_tx_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.canonical_tx_hash")?, - to_mint: required(&common_data.to_mint) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.to_mint")?, - refund_recipient: required(&common_data.refund_recipient_address) - .and_then(|x| parse_h160(x)) - .context("common_data.refund_recipient_address")?, - }) - } - proto::transaction::CommonData::L2(common_data) => { - ExecuteTransactionCommon::L2(L2TxCommonData { - nonce: required(&common_data.nonce) - .map(|x| Nonce(*x)) - .context("common_data.nonce")?, - fee: Fee { - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - max_priority_fee_per_gas: required( - &common_data.max_priority_fee_per_gas, - ) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_priority_fee_per_gas")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - }, - initiator_address: required(&common_data.initiator_address) - .and_then(|x| parse_h160(x)) - .context("common_data.initiator_address")?, - signature: required(&common_data.signature) - .context("common_data.signature")? - .clone(), - transaction_type: required(&common_data.transaction_type) - .and_then(|x| Ok(TransactionType::try_from(*x)?)) - .context("common_data.transaction_type")?, - input: { - match &common_data.input { - None => None, - Some(input) => Some(InputData { - hash: required(&input.hash) - .and_then(|x| parse_h256(x)) - .context("common_data.input.hash")?, - data: required(&input.data) - .context("common_data.input.data")? - .clone(), - }), - } - }, - paymaster_params: { - let params = required(&common_data.paymaster_params)?; - PaymasterParams { - paymaster: required(¶ms.paymaster_address) - .and_then(|x| parse_h160(x)) - .context("common_data.paymaster_params.paymaster_address")?, - paymaster_input: required(¶ms.paymaster_input) - .context("common_data.paymaster_params.paymaster_input")? - .clone(), - } - }, - }) - } - proto::transaction::CommonData::ProtocolUpgrade(common_data) => { - ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: required(&common_data.sender_address) - .and_then(|x| parse_h160(x)) - .context("common_data.sender_address")?, - upgrade_id: required(&common_data.upgrade_id) - .and_then(|x| Ok(ProtocolVersionId::try_from(u16::try_from(*x)?)?)) - .context("common_data.upgrade_id")?, - max_fee_per_gas: required(&common_data.max_fee_per_gas) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.max_fee_per_gas")?, - gas_limit: required(&common_data.gas_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_limit")?, - gas_per_pubdata_limit: required(&common_data.gas_per_pubdata_limit) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.gas_per_pubdata_limit")?, - eth_block: *required(&common_data.eth_block) - .context("common_data.eth_block")?, - canonical_tx_hash: required(&common_data.canonical_tx_hash) - .and_then(|x| parse_h256(x)) - .context("common_data.canonical_tx_hash")?, - to_mint: required(&common_data.to_mint) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("common_data.to_mint")?, - refund_recipient: required(&common_data.refund_recipient_address) - .and_then(|x| parse_h160(x)) - .context("common_data.refund_recipient_address")?, - }) - } - }, - execute: Execute { - contract_address: execute - .contract_address - .as_ref() - .and_then(|x| parse_h160(x).ok()), - calldata: required(&execute.calldata).context("calldata")?.clone(), - value: required(&execute.value) - .and_then(|x| parse_h256(x)) - .map(h256_to_u256) - .context("execute.value")?, - factory_deps: execute.factory_deps.clone(), - }, - received_timestamp_ms: 0, // This timestamp is local to the node - raw_bytes: self.raw_bytes.as_ref().map(|x| x.clone().into()), - }) - } - - fn build(this: &Self::Type) -> Self { - let common_data = match &this.common_data { - ExecuteTransactionCommon::L1(data) => { - proto::transaction::CommonData::L1(proto::L1TxCommonData { - sender_address: Some(data.sender.as_bytes().into()), - serial_id: Some(data.serial_id.0), - deadline_block: Some(0), - layer_2_tip_fee: Some(u256_to_h256(data.layer_2_tip_fee).as_bytes().into()), - full_fee: Some(u256_to_h256(data.full_fee).as_bytes().into()), - max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), - gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), - gas_per_pubdata_limit: Some( - u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), - ), - op_processing_type: Some(data.op_processing_type as u32), - priority_queue_type: Some(data.priority_queue_type as u32), - eth_hash: Some(H256::default().as_bytes().into()), - eth_block: Some(data.eth_block), - canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), - to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), - refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), - }) - } - ExecuteTransactionCommon::L2(data) => { - proto::transaction::CommonData::L2(proto::L2TxCommonData { - nonce: Some(data.nonce.0), - gas_limit: Some(u256_to_h256(data.fee.gas_limit).as_bytes().into()), - max_fee_per_gas: Some(u256_to_h256(data.fee.max_fee_per_gas).as_bytes().into()), - max_priority_fee_per_gas: Some( - u256_to_h256(data.fee.max_priority_fee_per_gas) - .as_bytes() - .into(), - ), - gas_per_pubdata_limit: Some( - u256_to_h256(data.fee.gas_per_pubdata_limit) - .as_bytes() - .into(), - ), - initiator_address: Some(data.initiator_address.as_bytes().into()), - signature: Some(data.signature.clone()), - transaction_type: Some(data.transaction_type as u32), - input: data.input.as_ref().map(|input_data| proto::InputData { - data: Some(input_data.data.clone()), - hash: Some(input_data.hash.as_bytes().into()), - }), - paymaster_params: Some(proto::PaymasterParams { - paymaster_input: Some(data.paymaster_params.paymaster_input.clone()), - paymaster_address: Some(data.paymaster_params.paymaster.as_bytes().into()), - }), - }) - } - ExecuteTransactionCommon::ProtocolUpgrade(data) => { - proto::transaction::CommonData::ProtocolUpgrade( - proto::ProtocolUpgradeTxCommonData { - sender_address: Some(data.sender.as_bytes().into()), - upgrade_id: Some(data.upgrade_id as u32), - max_fee_per_gas: Some(u256_to_h256(data.max_fee_per_gas).as_bytes().into()), - gas_limit: Some(u256_to_h256(data.gas_limit).as_bytes().into()), - gas_per_pubdata_limit: Some( - u256_to_h256(data.gas_per_pubdata_limit).as_bytes().into(), - ), - eth_hash: Some(H256::default().as_bytes().into()), - eth_block: Some(data.eth_block), - canonical_tx_hash: Some(data.canonical_tx_hash.as_bytes().into()), - to_mint: Some(u256_to_h256(data.to_mint).as_bytes().into()), - refund_recipient_address: Some(data.refund_recipient.as_bytes().into()), - }, - ) - } - }; - let execute = proto::Execute { - contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), - calldata: Some(this.execute.calldata.clone()), - value: Some(u256_to_h256(this.execute.value).as_bytes().into()), - factory_deps: this.execute.factory_deps.clone(), - }; - Self { - common_data: Some(common_data), - execute: Some(execute), - raw_bytes: this.raw_bytes.as_ref().map(|inner| inner.0.clone()), - } - } -} - -impl ProtoRepr for proto::AttesterCommittee { - type Type = attester::Committee; - - fn read(&self) -> anyhow::Result { - let members: Vec<_> = self - .members - .iter() - .enumerate() - .map(|(i, m)| attester::WeightedAttester::read(m).context(i)) - .collect::>() - .context("members")?; - Self::Type::new(members) - } - - fn build(this: &Self::Type) -> Self { - Self { - members: this.iter().map(|x| x.build()).collect(), - } - } -} diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index 6083ad02910..49a69e8a36e 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -6,9 +6,8 @@ import "zksync/roles/validator.proto"; import "zksync/roles/attester.proto"; import "zksync/roles/node.proto"; -enum L1BatchCommitDataGeneratorMode { - Rollup = 0; - Validium = 1; +message BlockMetadata { + optional roles.validator.PayloadHash payload_hash = 1; // required } message Payload { @@ -31,8 +30,8 @@ message Payload { } message PubdataParams { - optional bytes l2_da_validator_address = 1; // required; H160 - optional L1BatchCommitDataGeneratorMode pubdata_type = 2; // optional, default to rollup + optional bytes l2_da_validator_address = 1; // required; H160 + optional L1BatchCommitDataGeneratorMode pubdata_type = 2; // required } message L1Transaction { @@ -149,3 +148,8 @@ message AttestationStatus { optional roles.validator.GenesisHash genesis = 1; // required optional uint64 next_batch_to_attest = 2; // required } + +enum L1BatchCommitDataGeneratorMode { + Rollup = 0; + Validium = 1; +} diff --git a/core/lib/dal/src/consensus/testonly.rs b/core/lib/dal/src/consensus/testonly.rs index 904a4c563d2..13086323b17 100644 --- a/core/lib/dal/src/consensus/testonly.rs +++ b/core/lib/dal/src/consensus/testonly.rs @@ -1,11 +1,17 @@ -use rand::{ - distributions::{Distribution, Standard}, - Rng, -}; +use rand::{distributions::Distribution, Rng}; +use zksync_consensus_utils::EncodeDist; -use super::AttestationStatus; +use super::*; -impl Distribution for Standard { +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> BlockMetadata { + BlockMetadata { + payload_hash: rng.gen(), + } + } +} + +impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> AttestationStatus { AttestationStatus { genesis: rng.gen(), @@ -13,3 +19,16 @@ impl Distribution for Standard { } } } + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> GlobalConfig { + GlobalConfig { + genesis: rng.gen(), + registry_address: Some(rng.gen()), + seed_peers: self + .sample_range(rng) + .map(|_| (rng.gen(), self.sample(rng))) + .collect(), + } + } +} diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index e50ff5b1cae..df6ee24bfa9 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -1,10 +1,10 @@ use std::fmt::Debug; use rand::Rng; -use zksync_concurrency::ctx; +use zksync_concurrency::{ctx, testonly::abort_on_panic}; use zksync_protobuf::{ repr::{decode, encode}, - testonly::{test_encode, test_encode_random}, + testonly::{test_encode, test_encode_all_formats, FmtConv}, ProtoRepr, }; use zksync_test_account::Account; @@ -14,7 +14,7 @@ use zksync_types::{ Execute, ExecuteTransactionCommon, L1BatchNumber, ProtocolVersionId, Transaction, }; -use super::{proto, AttestationStatus, Payload}; +use super::*; use crate::tests::mock_protocol_upgrade_transaction; fn execute(rng: &mut impl Rng) -> Execute { @@ -53,22 +53,29 @@ fn payload(rng: &mut impl Rng, protocol_version: ProtocolVersionId) -> Payload { }) .collect(), last_in_batch: rng.gen(), - pubdata_params: Some(PubdataParams { - pubdata_type: match rng.gen_range(0..2) { - 0 => L1BatchCommitmentMode::Rollup, - _ => L1BatchCommitmentMode::Validium, - }, - l2_da_validator_address: rng.gen(), - }), + pubdata_params: if protocol_version.is_pre_gateway() { + PubdataParams::default() + } else { + PubdataParams { + pubdata_type: match rng.gen_range(0..2) { + 0 => L1BatchCommitmentMode::Rollup, + _ => L1BatchCommitmentMode::Validium, + }, + l2_da_validator_address: rng.gen(), + } + }, } } /// Tests struct <-> proto struct conversions. #[test] fn test_encoding() { + abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); - test_encode_random::(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); + test_encode_all_formats::>(rng); encode_decode::(l1_transaction(rng)); encode_decode::(l2_transaction(rng)); encode_decode::(l1_transaction(rng)); @@ -76,10 +83,15 @@ fn test_encoding() { encode_decode::( mock_protocol_upgrade_transaction().into(), ); - let p = payload(rng, ProtocolVersionId::Version24); - test_encode(rng, &p); - let p = payload(rng, ProtocolVersionId::Version25); - test_encode(rng, &p); + // Test encoding in the current and all the future versions. + for v in ProtocolVersionId::latest() as u16.. { + let Ok(v) = ProtocolVersionId::try_from(v) else { + break; + }; + tracing::info!("version {v}"); + let p = payload(rng, v); + test_encode(rng, &p); + } } fn encode_decode(msg: P::Type) diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal/mod.rs similarity index 74% rename from core/lib/dal/src/consensus_dal.rs rename to core/lib/dal/src/consensus_dal/mod.rs index dd976f22086..a091421d857 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal/mod.rs @@ -1,16 +1,63 @@ use anyhow::Context as _; +use zksync_consensus_crypto::keccak256::Keccak256; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BlockStoreState, ReplicaState}; +use zksync_consensus_storage::{BlockStoreState, Last, ReplicaState}; use zksync_db_connection::{ connection::Connection, error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_types::L2BlockNumber; +use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; +use zksync_types::{L1BatchNumber, L2BlockNumber}; -pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; +pub use crate::consensus::{proto, AttestationStatus, BlockMetadata, GlobalConfig, Payload}; use crate::{Core, CoreDal}; +#[cfg(test)] +mod tests; + +/// Hash of the batch. +pub fn batch_hash(info: &StoredBatchInfo) -> attester::BatchHash { + attester::BatchHash(Keccak256::from_bytes(info.hash().0)) +} + +/// Verifies that the transition from `old` to `new` is admissible. +pub fn verify_config_transition(old: &GlobalConfig, new: &GlobalConfig) -> anyhow::Result<()> { + anyhow::ensure!( + old.genesis.chain_id == new.genesis.chain_id, + "changing chain_id is not allowed: old = {:?}, new = {:?}", + old.genesis.chain_id, + new.genesis.chain_id, + ); + // Note that it may happen that the fork number didn't change, + // in case the binary was updated to support more fields in genesis struct. + // In such a case, the old binary was not able to connect to the consensus network, + // because of the genesis hash mismatch. + // TODO: Perhaps it would be better to deny unknown fields in the genesis instead. + // It would require embedding the genesis either as a json string or protobuf bytes within + // the global config, so that the global config can be parsed with + // `deny_unknown_fields:false` while genesis would be parsed with + // `deny_unknown_fields:true`. + anyhow::ensure!( + old.genesis.fork_number <= new.genesis.fork_number, + "transition to a past fork is not allowed: old = {:?}, new = {:?}", + old.genesis.fork_number, + new.genesis.fork_number, + ); + new.genesis.verify().context("genesis.verify()")?; + // This is a temporary hack until the `consensus_genesis()` RPC is disabled. + if new + == (&GlobalConfig { + genesis: old.genesis.clone(), + registry_address: None, + seed_peers: [].into(), + }) + { + anyhow::bail!("new config is equal to truncated old config, which means that it was sourced from the wrong endpoint"); + } + Ok(()) +} + /// Storage access methods for `zksync_core::consensus` module. #[derive(Debug)] pub struct ConsensusDal<'a, 'c> { @@ -22,8 +69,8 @@ pub struct ConsensusDal<'a, 'c> { pub enum InsertCertificateError { #[error("corresponding payload is missing")] MissingPayload, - #[error("certificate doesn't match the payload")] - PayloadMismatch, + #[error("certificate doesn't match the payload, payload = {0:?}")] + PayloadMismatch(Payload), #[error(transparent)] Dal(#[from] DalError), #[error(transparent)] @@ -85,6 +132,8 @@ impl ConsensusDal<'_, '_> { if got == want { return Ok(()); } + verify_config_transition(got, want)?; + // If genesis didn't change, just update the config. if got.genesis == want.genesis { let s = zksync_protobuf::serde::Serialize; @@ -103,30 +152,6 @@ impl ConsensusDal<'_, '_> { txn.commit().await?; return Ok(()); } - - // Verify the genesis change. - anyhow::ensure!( - got.genesis.chain_id == want.genesis.chain_id, - "changing chain_id is not allowed: old = {:?}, new = {:?}", - got.genesis.chain_id, - want.genesis.chain_id, - ); - // Note that it may happen that the fork number didn't change, - // in case the binary was updated to support more fields in genesis struct. - // In such a case, the old binary was not able to connect to the consensus network, - // because of the genesis hash mismatch. - // TODO: Perhaps it would be better to deny unknown fields in the genesis instead. - // It would require embedding the genesis either as a json string or protobuf bytes within - // the global config, so that the global config can be parsed with - // `deny_unknown_fields:false` while genesis would be parsed with - // `deny_unknown_fields:true`. - anyhow::ensure!( - got.genesis.fork_number <= want.genesis.fork_number, - "transition to a past fork is not allowed: old = {:?}, new = {:?}", - got.genesis.fork_number, - want.genesis.fork_number, - ); - want.genesis.verify().context("genesis.verify()")?; } // Reset the consensus state. @@ -305,47 +330,63 @@ impl ConsensusDal<'_, '_> { Ok(next) } - /// Fetches the last consensus certificate. + /// Fetches the block store state. + /// The blocks that are available to consensus are either pre-genesis or + /// have a consensus certificate. /// Currently, certificates are NOT generated synchronously with L2 blocks, - /// so it might NOT be the certificate for the last L2 block. - pub async fn block_certificates_range(&mut self) -> anyhow::Result { - // It cannot be older than genesis first block. - let mut start = self + /// so the `BlockStoreState.last` might be different than the last block in storage. + pub async fn block_store_state(&mut self) -> anyhow::Result { + let first = self.first_block().await.context("first_block()")?; + let cfg = self .global_config() - .await? - .context("genesis()")? - .genesis - .first_block; - start = start.max(self.first_block().await.context("first_block()")?); - let row = sqlx::query!( + .await + .context("global_config()")? + .context("global config is missing")?; + + // If there is a cert in storage, then the block range visible to consensus + // is [first block, block of last cert]. + if let Some(row) = sqlx::query!( r#" SELECT certificate FROM miniblocks_consensus - WHERE - number >= $1 ORDER BY number DESC LIMIT 1 "#, - i64::try_from(start.0)?, ) .instrument("block_certificate_range") .report_latency() .fetch_optional(self.storage) - .await?; - Ok(BlockStoreState { - first: start, - last: row - .map(|row| { + .await? + { + return Ok(BlockStoreState { + first, + last: Some(Last::Final( zksync_protobuf::serde::Deserialize { deny_unknown_fields: true, } - .proto_fmt(row.certificate) - }) - .transpose()?, + .proto_fmt(row.certificate)?, + )), + }); + } + + // Otherwise it is [first block, min(genesis.first_block-1,last block)]. + let next = self + .next_block() + .await + .context("next_block()")? + .min(cfg.genesis.first_block); + Ok(BlockStoreState { + first, + // unwrap is ok, because `next > first >= 0`. + last: if next > first { + Some(Last::PreGenesis(next.prev().unwrap())) + } else { + None + }, }) } @@ -461,6 +502,19 @@ impl ConsensusDal<'_, '_> { .next()) } + /// Fetches L2 block metadata for the given block number. + pub async fn block_metadata( + &mut self, + n: validator::BlockNumber, + ) -> anyhow::Result> { + let Some(b) = self.block_payload(n).await.context("block_payload()")? else { + return Ok(None); + }; + Ok(Some(BlockMetadata { + payload_hash: b.encode().hash(), + })) + } + /// Inserts a certificate for the L2 block `cert.header().number`. /// Fails if certificate doesn't match the stored block. pub async fn insert_block_certificate( @@ -474,7 +528,7 @@ impl ConsensusDal<'_, '_> { .await? .ok_or(E::MissingPayload)?; if header.payload != want_payload.encode().hash() { - return Err(E::PayloadMismatch); + return Err(E::PayloadMismatch(want_payload)); } sqlx::query!( r#" @@ -558,11 +612,25 @@ impl ConsensusDal<'_, '_> { )) } + /// Fetches the L1 batch info for the given number. + pub async fn batch_info( + &mut self, + number: attester::BatchNumber, + ) -> anyhow::Result> { + let n = L1BatchNumber(number.0.try_into().context("overflow")?); + Ok(self + .storage + .blocks_dal() + .get_l1_batch_metadata(n) + .await + .context("get_l1_batch_metadata()")? + .map(|x| StoredBatchInfo::from(&x))) + } + /// Inserts a certificate for the L1 batch. /// Noop if a certificate for the same L1 batch is already present. /// Verification against previously stored attester committee is performed. - /// Batch hash is not verified - it cannot be performed due to circular dependency on - /// `zksync_l1_contract_interface`. + /// Batch hash verification is performed. pub async fn insert_batch_certificate( &mut self, cert: &attester::BatchQC, @@ -577,6 +645,14 @@ impl ConsensusDal<'_, '_> { .await .context("attester_committee()")? .context("attester committee is missing")?; + let hash = batch_hash( + &self + .batch_info(cert.message.number) + .await + .context("batch()")? + .context("batch is missing")?, + ); + anyhow::ensure!(cert.message.hash == hash, "hash mismatch"); cert.verify(cfg.genesis.hash(), &committee) .context("cert.verify()")?; sqlx::query!( @@ -645,6 +721,8 @@ impl ConsensusDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ), ( SELECT @@ -711,158 +789,3 @@ impl ConsensusDal<'_, '_> { })) } } - -#[cfg(test)] -mod tests { - use rand::Rng as _; - use zksync_consensus_roles::{attester, validator}; - use zksync_consensus_storage::ReplicaState; - use zksync_types::ProtocolVersion; - - use super::GlobalConfig; - use crate::{ - tests::{create_l1_batch_header, create_l2_block_header}, - ConnectionPool, Core, CoreDal, - }; - - #[tokio::test] - async fn replica_state_read_write() { - let rng = &mut rand::thread_rng(); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); - for n in 0..3 { - let setup = validator::testonly::Setup::new(rng, 3); - let mut genesis = (*setup.genesis).clone(); - genesis.fork_number = validator::ForkNumber(n); - let cfg = GlobalConfig { - genesis: genesis.with_hash(), - registry_address: Some(rng.gen()), - seed_peers: [].into(), // TODO: rng.gen() for Host - }; - conn.consensus_dal() - .try_update_global_config(&cfg) - .await - .unwrap(); - assert_eq!( - cfg, - conn.consensus_dal().global_config().await.unwrap().unwrap() - ); - assert_eq!( - ReplicaState::default(), - conn.consensus_dal().replica_state().await.unwrap() - ); - for _ in 0..5 { - let want: ReplicaState = rng.gen(); - conn.consensus_dal().set_replica_state(&want).await.unwrap(); - assert_eq!( - cfg, - conn.consensus_dal().global_config().await.unwrap().unwrap() - ); - assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); - } - } - } - - #[tokio::test] - async fn test_batch_certificate() { - let rng = &mut rand::thread_rng(); - let setup = validator::testonly::Setup::new(rng, 3); - let pool = ConnectionPool::::test_pool().await; - let mut conn = pool.connection().await.unwrap(); - let cfg = GlobalConfig { - genesis: setup.genesis.clone(), - registry_address: Some(rng.gen()), - seed_peers: [].into(), - }; - conn.consensus_dal() - .try_update_global_config(&cfg) - .await - .unwrap(); - - let mut make_cert = |number: attester::BatchNumber| { - let m = attester::Batch { - genesis: setup.genesis.hash(), - hash: rng.gen(), - number, - }; - let mut sigs = attester::MultiSig::default(); - for k in &setup.attester_keys { - sigs.add(k.public(), k.sign_msg(m.clone()).sig); - } - attester::BatchQC { - message: m, - signatures: sigs, - } - }; - - // Required for inserting l2 blocks - conn.protocol_versions_dal() - .save_protocol_version_with_tx(&ProtocolVersion::default()) - .await - .unwrap(); - - // Insert some mock L2 blocks and L1 batches - let mut block_number = 0; - let mut batch_number = 0; - for _ in 0..3 { - for _ in 0..3 { - block_number += 1; - let l2_block = create_l2_block_header(block_number); - conn.blocks_dal().insert_l2_block(&l2_block).await.unwrap(); - } - batch_number += 1; - let l1_batch = create_l1_batch_header(batch_number); - conn.blocks_dal() - .insert_mock_l1_batch(&l1_batch) - .await - .unwrap(); - conn.blocks_dal() - .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) - .await - .unwrap(); - } - - let n = attester::BatchNumber(batch_number.into()); - - // Insert a batch certificate for the last L1 batch. - let want = make_cert(n); - conn.consensus_dal() - .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) - .await - .unwrap(); - conn.consensus_dal() - .insert_batch_certificate(&want) - .await - .unwrap(); - - // Reinserting a cert should fail. - assert!(conn - .consensus_dal() - .insert_batch_certificate(&make_cert(n)) - .await - .is_err()); - - // Retrieve the latest certificate. - let got_n = conn - .consensus_dal() - .last_batch_certificate_number() - .await - .unwrap() - .unwrap(); - let got = conn - .consensus_dal() - .batch_certificate(got_n) - .await - .unwrap() - .unwrap(); - assert_eq!(got, want); - - // Try insert batch certificate for non-existing batch - assert!(conn - .consensus_dal() - .insert_batch_certificate(&make_cert(n.next())) - .await - .is_err()); - } -} diff --git a/core/lib/dal/src/consensus_dal/tests.rs b/core/lib/dal/src/consensus_dal/tests.rs new file mode 100644 index 00000000000..694abc8508b --- /dev/null +++ b/core/lib/dal/src/consensus_dal/tests.rs @@ -0,0 +1,189 @@ +use rand::Rng as _; +use zksync_consensus_roles::{attester, validator}; +use zksync_consensus_storage::ReplicaState; +use zksync_types::{ + block::L1BatchTreeData, + commitment::{L1BatchCommitmentArtifacts, L1BatchCommitmentHash}, + ProtocolVersion, +}; + +use super::*; +use crate::{ + tests::{create_l1_batch_header, create_l2_block_header}, + ConnectionPool, Core, CoreDal, +}; + +#[tokio::test] +async fn replica_state_read_write() { + let rng = &mut rand::thread_rng(); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + assert_eq!(None, conn.consensus_dal().global_config().await.unwrap()); + for n in 0..3 { + let setup = validator::testonly::Setup::new(rng, 3); + let mut genesis = (*setup.genesis).clone(); + genesis.fork_number = validator::ForkNumber(n); + let cfg = GlobalConfig { + genesis: genesis.with_hash(), + registry_address: Some(rng.gen()), + seed_peers: [].into(), // TODO: rng.gen() for Host + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); + assert_eq!( + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() + ); + assert_eq!( + ReplicaState::default(), + conn.consensus_dal().replica_state().await.unwrap() + ); + for _ in 0..5 { + let want: ReplicaState = rng.gen(); + conn.consensus_dal().set_replica_state(&want).await.unwrap(); + assert_eq!( + cfg, + conn.consensus_dal().global_config().await.unwrap().unwrap() + ); + assert_eq!(want, conn.consensus_dal().replica_state().await.unwrap()); + } + } +} + +#[tokio::test] +async fn test_batch_certificate() { + let rng = &mut rand::thread_rng(); + let setup = validator::testonly::Setup::new(rng, 3); + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + let cfg = GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: Some(rng.gen()), + seed_peers: [].into(), + }; + conn.consensus_dal() + .try_update_global_config(&cfg) + .await + .unwrap(); + + let make_cert = |number: attester::BatchNumber, hash: attester::BatchHash| { + let m = attester::Batch { + genesis: setup.genesis.hash(), + hash, + number, + }; + let mut sigs = attester::MultiSig::default(); + for k in &setup.attester_keys { + sigs.add(k.public(), k.sign_msg(m.clone()).sig); + } + attester::BatchQC { + message: m, + signatures: sigs, + } + }; + + // Required for inserting l2 blocks + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + // Insert some mock L2 blocks and L1 batches + let mut block_number = 0; + let mut batch_number = 0; + for _ in 0..3 { + for _ in 0..3 { + block_number += 1; + let l2_block = create_l2_block_header(block_number); + conn.blocks_dal().insert_l2_block(&l2_block).await.unwrap(); + } + batch_number += 1; + let l1_batch = create_l1_batch_header(batch_number); + conn.blocks_dal() + .insert_mock_l1_batch(&l1_batch) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_tree_data( + l1_batch.number, + &L1BatchTreeData { + hash: rng.gen(), + rollup_last_leaf_index: rng.gen(), + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .save_l1_batch_commitment_artifacts( + l1_batch.number, + &L1BatchCommitmentArtifacts { + commitment_hash: L1BatchCommitmentHash { + pass_through_data: rng.gen(), + aux_output: rng.gen(), + meta_parameters: rng.gen(), + commitment: rng.gen(), + }, + l2_l1_merkle_root: rng.gen(), + compressed_state_diffs: None, + compressed_initial_writes: None, + compressed_repeated_writes: None, + zkporter_is_available: false, + aux_commitments: None, + aggregation_root: rng.gen(), + local_root: rng.gen(), + state_diff_hash: rng.gen(), + }, + ) + .await + .unwrap(); + conn.blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(l1_batch.number) + .await + .unwrap(); + } + + let n = attester::BatchNumber(batch_number.into()); + + // Insert a batch certificate for the last L1 batch. + let hash = batch_hash(&conn.consensus_dal().batch_info(n).await.unwrap().unwrap()); + let want = make_cert(n, hash); + conn.consensus_dal() + .upsert_attester_committee(n, setup.genesis.attesters.as_ref().unwrap()) + .await + .unwrap(); + conn.consensus_dal() + .insert_batch_certificate(&want) + .await + .unwrap(); + + // Reinserting a cert should fail. + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n, hash)) + .await + .is_err()); + + // Retrieve the latest certificate. + let got_n = conn + .consensus_dal() + .last_batch_certificate_number() + .await + .unwrap() + .unwrap(); + let got = conn + .consensus_dal() + .batch_certificate(got_n) + .await + .unwrap() + .unwrap(); + assert_eq!(got, want); + + // Try insert batch certificate for non-existing batch + assert!(conn + .consensus_dal() + .insert_batch_certificate(&make_cert(n.next(), rng.gen())) + .await + .is_err()); +} diff --git a/core/lib/dal/src/factory_deps_dal.rs b/core/lib/dal/src/factory_deps_dal.rs index 36dfaa1a466..857e2973ae3 100644 --- a/core/lib/dal/src/factory_deps_dal.rs +++ b/core/lib/dal/src/factory_deps_dal.rs @@ -94,6 +94,7 @@ impl FactoryDepsDal<'_, '_> { &mut self, bootloader_hash: H256, default_aa_hash: H256, + evm_emulator_hash: Option, ) -> anyhow::Result { let bootloader_bytecode = self .get_sealed_factory_dep(bootloader_hash) @@ -115,9 +116,26 @@ impl FactoryDepsDal<'_, '_> { code: bytes_to_be_words(default_aa_bytecode), hash: default_aa_hash, }; + + let evm_emulator_code = if let Some(evm_emulator_hash) = evm_emulator_hash { + let evm_emulator_bytecode = self + .get_sealed_factory_dep(evm_emulator_hash) + .await + .context("failed loading EVM emulator code")? + .with_context(|| format!("EVM emulator code with hash {evm_emulator_hash:?} should be present in the database"))?; + + Some(SystemContractCode { + code: bytes_to_be_words(evm_emulator_bytecode), + hash: evm_emulator_hash, + }) + } else { + None + }; + Ok(BaseSystemContracts { bootloader: bootloader_code, default_aa: default_aa_code, + evm_emulator: evm_emulator_code, }) } diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index a57ebd9e48a..20b428adec4 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -23,8 +23,7 @@ use crate::{ snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, storage_logs_dedup_dal::StorageLogsDedupDal, storage_web3_dal::StorageWeb3Dal, sync_dal::SyncDal, system_dal::SystemDal, tee_proof_generation_dal::TeeProofGenerationDal, - tee_verifier_input_producer_dal::TeeVerifierInputProducerDal, tokens_dal::TokensDal, - tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, + tokens_dal::TokensDal, tokens_web3_dal::TokensWeb3Dal, transactions_dal::TransactionsDal, transactions_web3_dal::TransactionsWeb3Dal, vm_runner_dal::VmRunnerDal, }; @@ -56,7 +55,6 @@ pub mod storage_web3_dal; pub mod sync_dal; pub mod system_dal; pub mod tee_proof_generation_dal; -pub mod tee_verifier_input_producer_dal; pub mod tokens_dal; pub mod tokens_web3_dal; pub mod transactions_dal; @@ -81,8 +79,6 @@ where fn transactions_web3_dal(&mut self) -> TransactionsWeb3Dal<'_, 'a>; - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a>; - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a>; fn blocks_web3_dal(&mut self) -> BlocksWeb3Dal<'_, 'a>; @@ -155,10 +151,6 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { TransactionsWeb3Dal { storage: self } } - fn tee_verifier_input_producer_dal(&mut self) -> TeeVerifierInputProducerDal<'_, 'a> { - TeeVerifierInputProducerDal { storage: self } - } - fn blocks_dal(&mut self) -> BlocksDal<'_, 'a> { BlocksDal { storage: self } } diff --git a/core/lib/dal/src/models/mod.rs b/core/lib/dal/src/models/mod.rs index 479649f8509..12e41ac780a 100644 --- a/core/lib/dal/src/models/mod.rs +++ b/core/lib/dal/src/models/mod.rs @@ -1,7 +1,6 @@ pub mod storage_block; -use anyhow::Context as _; use zksync_db_connection::error::SqlxContext; -use zksync_types::{ProtocolVersionId, H160, H256}; +use zksync_types::ProtocolVersionId; mod call; pub mod storage_base_token_ratio; @@ -19,18 +18,6 @@ pub mod storage_verification_request; #[cfg(test)] mod tests; -pub(crate) fn parse_h256(bytes: &[u8]) -> anyhow::Result { - Ok(<[u8; 32]>::try_from(bytes).context("invalid size")?.into()) -} - -fn parse_h256_opt(bytes: Option<&[u8]>) -> anyhow::Result { - parse_h256(bytes.context("missing data")?) -} - -pub(crate) fn parse_h160(bytes: &[u8]) -> anyhow::Result { - Ok(<[u8; 20]>::try_from(bytes).context("invalid size")?.into()) -} - pub(crate) fn parse_protocol_version(raw: i32) -> sqlx::Result { u16::try_from(raw) .decode_column("protocol_version")? diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index 3d9264ddd9e..159ed71cc3e 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -6,7 +6,7 @@ use thiserror::Error; use zksync_contracts::BaseSystemContractsHashes; use zksync_types::{ api, - block::{L1BatchHeader, L2BlockHeader}, + block::{L1BatchHeader, L2BlockHeader, UnsealedL1BatchHeader}, commitment::{L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, PubdataParams}, fee_model::{BatchFeeInput, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput}, l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log, UserL2ToL1Log}, @@ -44,6 +44,7 @@ pub(crate) struct StorageL1BatchHeader { pub used_contract_hashes: serde_json::Value, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, // `system_logs` are introduced as part of boojum and will be absent in all batches generated prior to boojum. @@ -52,6 +53,7 @@ pub(crate) struct StorageL1BatchHeader { // will be exactly 7 (or 8 in the event of a protocol upgrade) system logs. pub system_logs: Vec>, pub pubdata_input: Option>, + pub fee_address: Vec, } impl StorageL1BatchHeader { @@ -82,12 +84,14 @@ impl StorageL1BatchHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self .protocol_version .map(|v| (v as u16).try_into().unwrap()), pubdata_input: self.pubdata_input, + fee_address: Address::from_slice(&self.fee_address), } } } @@ -103,6 +107,7 @@ fn convert_l2_to_l1_logs(raw_logs: Vec>) -> Vec { fn convert_base_system_contracts_hashes( bootloader_code_hash: Option>, default_aa_code_hash: Option>, + evm_emulator_code_hash: Option>, ) -> BaseSystemContractsHashes { BaseSystemContractsHashes { bootloader: bootloader_code_hash @@ -111,6 +116,7 @@ fn convert_base_system_contracts_hashes( default_aa: default_aa_code_hash .map(|hash| H256::from_slice(&hash)) .expect("should not be none"), + evm_emulator: evm_emulator_code_hash.map(|hash| H256::from_slice(&hash)), } } @@ -134,15 +140,13 @@ pub(crate) struct StorageL1Batch { pub zkporter_is_available: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub l2_to_l1_messages: Vec>, pub l2_l1_merkle_root: Option>, pub compressed_initial_writes: Option>, pub compressed_repeated_writes: Option>, - pub aggregation_root: Option>, - pub local_root: Option>, - pub used_contract_hashes: serde_json::Value, pub system_logs: Vec>, pub compressed_state_diffs: Option>, @@ -150,6 +154,9 @@ pub(crate) struct StorageL1Batch { pub events_queue_commitment: Option>, pub bootloader_initial_content_commitment: Option>, pub pubdata_input: Option>, + pub fee_address: Vec, + pub aggregation_root: Option>, + pub local_root: Option>, pub state_diff_hash: Option>, pub inclusion_data: Option>, } @@ -182,12 +189,14 @@ impl StorageL1Batch { base_system_contracts_hashes: convert_base_system_contracts_hashes( self.bootloader_code_hash, self.default_aa_code_hash, + self.evm_emulator_code_hash, ), system_logs: system_logs.into_iter().map(SystemL2ToL1Log).collect(), protocol_version: self .protocol_version .map(|v| (v as u16).try_into().unwrap()), pubdata_input: self.pubdata_input, + fee_address: Address::from_slice(&self.fee_address), } } } @@ -245,6 +254,10 @@ impl TryFrom for L1BatchMetadata { .default_aa_code_hash .ok_or(L1BatchMetadataError::Incomplete("default_aa_code_hash"))?, ), + evm_emulator_code_hash: batch + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), protocol_version: batch .protocol_version .map(|v| (v as u16).try_into().unwrap()), @@ -262,6 +275,38 @@ impl TryFrom for L1BatchMetadata { } } +/// Partial projection of the columns corresponding to an unsealed [`L1BatchHeader`]. +#[derive(Debug, Clone)] +pub(crate) struct UnsealedStorageL1Batch { + pub number: i64, + pub timestamp: i64, + pub protocol_version: Option, + pub fee_address: Vec, + pub l1_gas_price: i64, + pub l2_fair_gas_price: i64, + pub fair_pubdata_price: Option, +} + +impl From for UnsealedL1BatchHeader { + fn from(batch: UnsealedStorageL1Batch) -> Self { + let protocol_version: Option = batch + .protocol_version + .map(|v| (v as u16).try_into().unwrap()); + Self { + number: L1BatchNumber(batch.number as u32), + timestamp: batch.timestamp as u64, + protocol_version, + fee_address: Address::from_slice(&batch.fee_address), + fee_input: BatchFeeInput::for_protocol_version( + protocol_version.unwrap_or_else(ProtocolVersionId::last_potentially_undefined), + batch.l2_fair_gas_price as u64, + batch.fair_pubdata_price.map(|p| p as u64), + batch.l1_gas_price as u64, + ), + } + } +} + #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageBlockDetails { pub number: i64, @@ -284,6 +329,7 @@ pub(crate) struct StorageBlockDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: Option, } @@ -329,6 +375,7 @@ impl From for api::BlockDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::BlockDetails { @@ -361,6 +408,7 @@ pub(crate) struct StorageL1BatchDetails { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, } impl From for api::L1BatchDetails { @@ -404,6 +452,7 @@ impl From for api::L1BatchDetails { base_system_contracts_hashes: convert_base_system_contracts_hashes( details.bootloader_code_hash, details.default_aa_code_hash, + details.evm_emulator_code_hash, ), }; api::L1BatchDetails { @@ -427,8 +476,7 @@ pub(crate) struct StorageL2BlockHeader { // L2 gas price assumed in the corresponding batch pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, - pub l2_da_validator_address: Vec, - pub pubdata_type: String, + pub evm_emulator_code_hash: Option>, pub protocol_version: Option, pub fair_pubdata_price: Option, @@ -445,6 +493,8 @@ pub(crate) struct StorageL2BlockHeader { /// This value should bound the maximal amount of gas that can be spent by transactions in the miniblock. pub gas_limit: Option, pub logs_bloom: Option>, + pub l2_da_validator_address: Vec, + pub pubdata_type: String, } impl From for L2BlockHeader { @@ -482,11 +532,8 @@ impl From for L2BlockHeader { base_system_contracts_hashes: convert_base_system_contracts_hashes( row.bootloader_code_hash, row.default_aa_code_hash, + row.evm_emulator_code_hash, ), - pubdata_params: PubdataParams { - l2_da_validator_address: Address::from_slice(&row.l2_da_validator_address), - pubdata_type: L1BatchCommitmentMode::from_str(&row.pubdata_type).unwrap(), - }, gas_per_pubdata_limit: row.gas_per_pubdata_limit as u64, protocol_version, virtual_blocks: row.virtual_blocks as u32, @@ -495,6 +542,10 @@ impl From for L2BlockHeader { .logs_bloom .map(|b| Bloom::from_slice(&b)) .unwrap_or_default(), + pubdata_params: PubdataParams { + l2_da_validator_address: Address::from_slice(&row.l2_da_validator_address), + pubdata_type: L1BatchCommitmentMode::from_str(&row.pubdata_type).unwrap(), + }, } } } diff --git a/core/lib/dal/src/models/storage_protocol_version.rs b/core/lib/dal/src/models/storage_protocol_version.rs index e53bf7b9d0a..a833236a7b6 100644 --- a/core/lib/dal/src/models/storage_protocol_version.rs +++ b/core/lib/dal/src/models/storage_protocol_version.rs @@ -16,6 +16,7 @@ pub struct StorageProtocolVersion { pub snark_wrapper_vk_hash: Vec, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, } pub(crate) fn protocol_version_from_storage( @@ -34,6 +35,10 @@ pub(crate) fn protocol_version_from_storage( base_system_contracts_hashes: BaseSystemContractsHashes { bootloader: H256::from_slice(&storage_version.bootloader_code_hash), default_aa: H256::from_slice(&storage_version.default_account_code_hash), + evm_emulator: storage_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), }, tx, } @@ -45,6 +50,7 @@ pub struct StorageApiProtocolVersion { pub timestamp: i64, pub bootloader_code_hash: Vec, pub default_account_code_hash: Vec, + pub evm_emulator_code_hash: Option>, pub upgrade_tx_hash: Option>, } @@ -60,6 +66,10 @@ impl From for api::ProtocolVersion { storage_protocol_version.timestamp as u64, H256::from_slice(&storage_protocol_version.bootloader_code_hash), H256::from_slice(&storage_protocol_version.default_account_code_hash), + storage_protocol_version + .evm_emulator_code_hash + .as_deref() + .map(H256::from_slice), l2_system_upgrade_tx_hash, ) } diff --git a/core/lib/dal/src/models/storage_sync.rs b/core/lib/dal/src/models/storage_sync.rs index 7bb3c228748..3f80f52c56e 100644 --- a/core/lib/dal/src/models/storage_sync.rs +++ b/core/lib/dal/src/models/storage_sync.rs @@ -5,13 +5,11 @@ use zksync_db_connection::error::SqlxContext; use zksync_types::{ api::en, commitment::{L1BatchCommitmentMode, PubdataParams}, - Address, L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, + parse_h160, parse_h256, parse_h256_opt, Address, L1BatchNumber, L2BlockNumber, + ProtocolVersionId, Transaction, H256, }; -use crate::{ - consensus_dal::Payload, - models::{parse_h160, parse_h256, parse_h256_opt, parse_protocol_version}, -}; +use crate::{consensus_dal::Payload, models::parse_protocol_version}; #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct StorageSyncBlock { @@ -26,6 +24,7 @@ pub(crate) struct StorageSyncBlock { pub fair_pubdata_price: Option, pub bootloader_code_hash: Option>, pub default_aa_code_hash: Option>, + pub evm_emulator_code_hash: Option>, pub fee_account_address: Vec, pub protocol_version: i32, pub virtual_blocks: i64, @@ -82,12 +81,12 @@ impl TryFrom for SyncBlock { .decode_column("bootloader_code_hash")?, default_aa: parse_h256_opt(block.default_aa_code_hash.as_deref()) .decode_column("default_aa_code_hash")?, - }, - pubdata_params: PubdataParams { - pubdata_type: L1BatchCommitmentMode::from_str(&block.pubdata_type) - .expect("Invalid pubdata type"), - l2_da_validator_address: parse_h160(&block.l2_da_validator_address) - .decode_column("l2_da_validator_address")?, + evm_emulator: block + .evm_emulator_code_hash + .as_deref() + .map(parse_h256) + .transpose() + .decode_column("evm_emulator_code_hash")?, }, fee_account_address: parse_h160(&block.fee_account_address) .decode_column("fee_account_address")?, @@ -97,6 +96,12 @@ impl TryFrom for SyncBlock { .decode_column("virtual_blocks")?, hash: parse_h256(&block.hash).decode_column("hash")?, protocol_version: parse_protocol_version(block.protocol_version)?, + pubdata_params: PubdataParams { + pubdata_type: L1BatchCommitmentMode::from_str(&block.pubdata_type) + .decode_column("Invalid pubdata type")?, + l2_da_validator_address: parse_h160(&block.l2_da_validator_address) + .decode_column("l2_da_validator_address")?, + }, }) } } @@ -134,7 +139,7 @@ impl SyncBlock { operator_address: self.fee_account_address, transactions, last_in_batch: self.last_in_batch, - pubdata_params: Some(self.pubdata_params), + pubdata_params: self.pubdata_params, } } } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index bb219ee1d61..78daaebb335 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -352,6 +352,16 @@ impl From for TransactionReceipt { .index_in_block .map_or_else(Default::default, U64::from); + // For better compatibility with various clients, we never return `None` recipient address. + let to = storage_receipt + .transfer_to + .or(storage_receipt.execute_contract_address) + .and_then(|addr| { + serde_json::from_value::>(addr) + .expect("invalid address value in the database") + }) + .unwrap_or_else(Address::zero); + let block_hash = H256::from_slice(&storage_receipt.block_hash); TransactionReceipt { transaction_hash: H256::from_slice(&storage_receipt.tx_hash), @@ -361,15 +371,7 @@ impl From for TransactionReceipt { l1_batch_tx_index: storage_receipt.l1_batch_tx_index.map(U64::from), l1_batch_number: storage_receipt.l1_batch_number.map(U64::from), from: H160::from_slice(&storage_receipt.initiator_address), - to: storage_receipt - .transfer_to - .or(storage_receipt.execute_contract_address) - .map(|addr| { - serde_json::from_value::
(addr) - .expect("invalid address value in the database") - }) - // For better compatibility with various clients, we never return null. - .or_else(|| Some(Address::default())), + to: Some(to), cumulative_gas_used: Default::default(), // TODO: Should be actually calculated (SMA-1183). gas_used: { let refunded_gas: U256 = storage_receipt.refunded_gas.into(); @@ -508,6 +510,10 @@ impl StorageApiTransaction { .signature .and_then(|signature| PackedEthSignature::deserialize_packed(&signature).ok()); + let to = serde_json::from_value(self.execute_contract_address) + .ok() + .unwrap_or_default(); + // For legacy and EIP-2930 transactions it is gas price willing to be paid by the sender in wei. // For other transactions it should be the effective gas price if transaction is included in block, // otherwise this value should be set equal to the max fee per gas. @@ -528,7 +534,7 @@ impl StorageApiTransaction { block_number: self.block_number.map(|number| U64::from(number as u64)), transaction_index: self.index_in_block.map(|idx| U64::from(idx as u64)), from: Some(Address::from_slice(&self.initiator_address)), - to: Some(serde_json::from_value(self.execute_contract_address).unwrap()), + to, value: bigdecimal_to_u256(self.value), gas_price: Some(bigdecimal_to_u256(gas_price)), gas: bigdecimal_to_u256(self.gas_limit.unwrap_or_else(BigDecimal::zero)), diff --git a/core/lib/dal/src/protocol_versions_dal.rs b/core/lib/dal/src/protocol_versions_dal.rs index 72ae811ce76..fcc756e3006 100644 --- a/core/lib/dal/src/protocol_versions_dal.rs +++ b/core/lib/dal/src/protocol_versions_dal.rs @@ -45,17 +45,22 @@ impl ProtocolVersionsDal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash, created_at ) VALUES - ($1, $2, $3, $4, $5, NOW()) + ($1, $2, $3, $4, $5, $6, NOW()) ON CONFLICT DO NOTHING "#, version.minor as i32, timestamp as i64, base_system_contracts_hashes.bootloader.as_bytes(), base_system_contracts_hashes.default_aa.as_bytes(), + base_system_contracts_hashes + .evm_emulator + .as_ref() + .map(H256::as_bytes), tx_hash.as_ref().map(H256::as_bytes), ) .instrument("save_protocol_version#minor") @@ -185,6 +190,43 @@ impl ProtocolVersionsDal<'_, '_> { ProtocolVersionId::try_from(row.id as u16).map_err(|err| sqlx::Error::Decode(err.into())) } + /// Returns base system contracts' hashes. Prefer `load_base_system_contracts_by_version_id` if + /// you also want to load the contracts themselves AND expect the contracts to be in the DB + /// already. + pub async fn get_base_system_contract_hashes_by_version_id( + &mut self, + version_id: u16, + ) -> anyhow::Result> { + let row = sqlx::query!( + r#" + SELECT + bootloader_code_hash, + default_account_code_hash, + evm_emulator_code_hash + FROM + protocol_versions + WHERE + id = $1 + "#, + i32::from(version_id) + ) + .instrument("get_base_system_contract_hashes_by_version_id") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) + .await + .context("cannot fetch system contract hashes")?; + + Ok(if let Some(row) = row { + Some(BaseSystemContractsHashes { + bootloader: H256::from_slice(&row.bootloader_code_hash), + default_aa: H256::from_slice(&row.default_account_code_hash), + evm_emulator: row.evm_emulator_code_hash.as_deref().map(H256::from_slice), + }) + } else { + None + }) + } + pub async fn load_base_system_contracts_by_version_id( &mut self, version_id: u16, @@ -193,7 +235,8 @@ impl ProtocolVersionsDal<'_, '_> { r#" SELECT bootloader_code_hash, - default_account_code_hash + default_account_code_hash, + evm_emulator_code_hash FROM protocol_versions WHERE @@ -201,7 +244,9 @@ impl ProtocolVersionsDal<'_, '_> { "#, i32::from(version_id) ) - .fetch_optional(self.storage.conn()) + .instrument("load_base_system_contracts_by_version_id") + .with_arg("version_id", &version_id) + .fetch_optional(self.storage) .await .context("cannot fetch system contract hashes")?; @@ -212,6 +257,7 @@ impl ProtocolVersionsDal<'_, '_> { .get_base_system_contracts( H256::from_slice(&row.bootloader_code_hash), H256::from_slice(&row.default_account_code_hash), + row.evm_emulator_code_hash.as_deref().map(H256::from_slice), ) .await?; Some(contracts) @@ -232,6 +278,7 @@ impl ProtocolVersionsDal<'_, '_> { protocol_versions.timestamp, protocol_versions.bootloader_code_hash, protocol_versions.default_account_code_hash, + protocol_versions.evm_emulator_code_hash, protocol_patches.patch, protocol_patches.snark_wrapper_vk_hash FROM @@ -373,6 +420,8 @@ impl ProtocolVersionsDal<'_, '_> { protocol_version FROM l1_batches + WHERE + is_sealed ORDER BY number DESC LIMIT diff --git a/core/lib/dal/src/protocol_versions_web3_dal.rs b/core/lib/dal/src/protocol_versions_web3_dal.rs index a3a7a162c3d..adc3957f872 100644 --- a/core/lib/dal/src/protocol_versions_web3_dal.rs +++ b/core/lib/dal/src/protocol_versions_web3_dal.rs @@ -21,6 +21,7 @@ impl ProtocolVersionsWeb3Dal<'_, '_> { timestamp, bootloader_code_hash, default_account_code_hash, + evm_emulator_code_hash, upgrade_tx_hash FROM protocol_versions diff --git a/core/lib/dal/src/storage_web3_dal.rs b/core/lib/dal/src/storage_web3_dal.rs index f3a20ac39fa..10d2cfe6152 100644 --- a/core/lib/dal/src/storage_web3_dal.rs +++ b/core/lib/dal/src/storage_web3_dal.rs @@ -15,6 +15,13 @@ use zksync_utils::h256_to_u256; use crate::{models::storage_block::ResolvedL1BatchForL2Block, Core, CoreDal}; +/// Raw bytecode information returned by [`StorageWeb3Dal::get_contract_code_unchecked()`]. +#[derive(Debug)] +pub struct RawBytecode { + pub bytecode_hash: H256, + pub bytecode: Vec, +} + #[derive(Debug)] pub struct StorageWeb3Dal<'a, 'c> { pub(crate) storage: &'a mut Connection<'c, Core>, @@ -178,6 +185,8 @@ impl StorageWeb3Dal<'_, '_> { MAX(number) + 1 FROM l1_batches + WHERE + is_sealed ), ( SELECT @@ -232,16 +241,17 @@ impl StorageWeb3Dal<'_, '_> { &mut self, address: Address, block_number: L2BlockNumber, - ) -> DalResult>> { + ) -> DalResult> { let hashed_key = get_code_key(&address).hashed_key(); let row = sqlx::query!( r#" SELECT + bytecode_hash, bytecode FROM ( SELECT - * + value FROM storage_logs WHERE @@ -252,7 +262,7 @@ impl StorageWeb3Dal<'_, '_> { storage_logs.operation_number DESC LIMIT 1 - ) t + ) deploy_log JOIN factory_deps ON value = factory_deps.bytecode_hash WHERE value != $3 @@ -266,7 +276,11 @@ impl StorageWeb3Dal<'_, '_> { .with_arg("block_number", &block_number) .fetch_optional(self.storage) .await?; - Ok(row.map(|row| row.bytecode)) + + Ok(row.map(|row| RawBytecode { + bytecode_hash: H256::from_slice(&row.bytecode_hash), + bytecode: row.bytecode, + })) } /// Given bytecode hash, returns bytecode and L2 block number at which it was inserted. diff --git a/core/lib/dal/src/sync_dal.rs b/core/lib/dal/src/sync_dal.rs index c4043b6641e..55e6543c028 100644 --- a/core/lib/dal/src/sync_dal.rs +++ b/core/lib/dal/src/sync_dal.rs @@ -35,6 +35,8 @@ impl SyncDal<'_, '_> { (MAX(number) + 1) FROM l1_batches + WHERE + is_sealed ), ( SELECT @@ -50,6 +52,7 @@ impl SyncDal<'_, '_> { miniblocks.fair_pubdata_price, miniblocks.bootloader_code_hash, miniblocks.default_aa_code_hash, + miniblocks.evm_emulator_code_hash, miniblocks.virtual_blocks, miniblocks.hash, miniblocks.protocol_version AS "protocol_version!", diff --git a/core/lib/dal/src/tee_proof_generation_dal.rs b/core/lib/dal/src/tee_proof_generation_dal.rs index db56b9d0e3e..755d0276910 100644 --- a/core/lib/dal/src/tee_proof_generation_dal.rs +++ b/core/lib/dal/src/tee_proof_generation_dal.rs @@ -10,10 +10,7 @@ use zksync_db_connection::{ }; use zksync_types::{tee_types::TeeType, L1BatchNumber}; -use crate::{ - models::storage_tee_proof::StorageTeeProof, - tee_verifier_input_producer_dal::TeeVerifierInputProducerJobStatus, Core, -}; +use crate::{models::storage_tee_proof::StorageTeeProof, Core}; #[derive(Debug)] pub struct TeeProofGenerationDal<'a, 'c> { @@ -35,65 +32,76 @@ impl TeeProofGenerationDal<'_, '_> { &mut self, tee_type: TeeType, processing_timeout: Duration, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> DalResult> { let processing_timeout = pg_interval_from_duration(processing_timeout); - let min_batch_number = min_batch_number.map_or(0, |num| i64::from(num.0)); - let query = sqlx::query!( + let min_batch_number = i64::from(min_batch_number.0); + sqlx::query!( r#" - UPDATE tee_proof_generation_details - SET - status = $1, - updated_at = NOW(), - prover_taken_at = NOW() - WHERE - tee_type = $2 - AND l1_batch_number = ( - SELECT - proofs.l1_batch_number - FROM - tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number - WHERE - inputs.status = $3 - AND ( - proofs.status = $4 + WITH upsert AS ( + SELECT + p.l1_batch_number + FROM + proof_generation_details p + LEFT JOIN + tee_proof_generation_details tee + ON + p.l1_batch_number = tee.l1_batch_number + AND tee.tee_type = $1 + WHERE + ( + p.l1_batch_number >= $5 + AND p.vm_run_data_blob_url IS NOT NULL + AND p.proof_gen_data_blob_url IS NOT NULL + ) + AND ( + tee.l1_batch_number IS NULL + OR ( + tee.status = $3 OR ( - proofs.status = $1 - AND proofs.prover_taken_at < NOW() - $5::INTERVAL + tee.status = $2 + AND tee.prover_taken_at < NOW() - $4::INTERVAL ) ) - AND proofs.l1_batch_number >= $6 - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) + ) + FETCH FIRST ROW ONLY + ) + + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at, prover_taken_at + ) + SELECT + l1_batch_number, + $1, + $2, + NOW(), + NOW(), + NOW() + FROM + upsert + ON CONFLICT (l1_batch_number, tee_type) DO + UPDATE + SET + status = $2, + updated_at = NOW(), + prover_taken_at = NOW() RETURNING - tee_proof_generation_details.l1_batch_number + l1_batch_number "#, - TeeProofGenerationJobStatus::PickedByProver.to_string(), tee_type.to_string(), - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, + TeeProofGenerationJobStatus::PickedByProver.to_string(), TeeProofGenerationJobStatus::Unpicked.to_string(), processing_timeout, min_batch_number - ); - - let batch_number = Instrumented::new("lock_batch_for_proving") - .with_arg("tee_type", &tee_type) - .with_arg("processing_timeout", &processing_timeout) - .with_arg("l1_batch_number", &min_batch_number) - .with(query) - .fetch_optional(self.storage) - .await? - .map(|row| L1BatchNumber(row.l1_batch_number as u32)); - - Ok(batch_number) + ) + .instrument("lock_batch_for_proving") + .with_arg("tee_type", &tee_type) + .with_arg("processing_timeout", &processing_timeout) + .with_arg("l1_batch_number", &min_batch_number) + .fetch_optional(self.storage) + .await + .map(|record| record.map(|record| L1BatchNumber(record.l1_batch_number as u32))) } pub async fn unlock_batch( @@ -176,38 +184,6 @@ impl TeeProofGenerationDal<'_, '_> { Ok(()) } - pub async fn insert_tee_proof_generation_job( - &mut self, - batch_number: L1BatchNumber, - tee_type: TeeType, - ) -> DalResult<()> { - let batch_number = i64::from(batch_number.0); - let query = sqlx::query!( - r#" - INSERT INTO - tee_proof_generation_details ( - l1_batch_number, tee_type, status, created_at, updated_at - ) - VALUES - ($1, $2, $3, NOW(), NOW()) - ON CONFLICT (l1_batch_number, tee_type) DO NOTHING - "#, - batch_number, - tee_type.to_string(), - TeeProofGenerationJobStatus::Unpicked.to_string(), - ); - let instrumentation = Instrumented::new("insert_tee_proof_generation_job") - .with_arg("l1_batch_number", &batch_number) - .with_arg("tee_type", &tee_type); - instrumentation - .clone() - .with(query) - .execute(self.storage) - .await?; - - Ok(()) - } - pub async fn save_attestation(&mut self, pubkey: &[u8], attestation: &[u8]) -> DalResult<()> { let query = sqlx::query!( r#" @@ -271,6 +247,40 @@ impl TeeProofGenerationDal<'_, '_> { Ok(proofs) } + /// For testing purposes only. + pub async fn insert_tee_proof_generation_job( + &mut self, + batch_number: L1BatchNumber, + tee_type: TeeType, + ) -> DalResult<()> { + let batch_number = i64::from(batch_number.0); + let query = sqlx::query!( + r#" + INSERT INTO + tee_proof_generation_details ( + l1_batch_number, tee_type, status, created_at, updated_at + ) + VALUES + ($1, $2, $3, NOW(), NOW()) + ON CONFLICT (l1_batch_number, tee_type) DO NOTHING + "#, + batch_number, + tee_type.to_string(), + TeeProofGenerationJobStatus::Unpicked.to_string(), + ); + let instrumentation = Instrumented::new("insert_tee_proof_generation_job") + .with_arg("l1_batch_number", &batch_number) + .with_arg("tee_type", &tee_type); + instrumentation + .clone() + .with(query) + .execute(self.storage) + .await?; + + Ok(()) + } + + /// For testing purposes only. pub async fn get_oldest_unpicked_batch(&mut self) -> DalResult> { let query = sqlx::query!( r#" @@ -278,18 +288,13 @@ impl TeeProofGenerationDal<'_, '_> { proofs.l1_batch_number FROM tee_proof_generation_details AS proofs - JOIN - tee_verifier_input_producer_jobs AS inputs - ON proofs.l1_batch_number = inputs.l1_batch_number WHERE - inputs.status = $1 - AND proofs.status = $2 + proofs.status = $1 ORDER BY proofs.l1_batch_number ASC LIMIT 1 "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, TeeProofGenerationJobStatus::Unpicked.to_string(), ); let batch_number = Instrumented::new("get_oldest_unpicked_batch") diff --git a/core/lib/dal/src/tee_verifier_input_producer_dal.rs b/core/lib/dal/src/tee_verifier_input_producer_dal.rs deleted file mode 100644 index dddb451a2d7..00000000000 --- a/core/lib/dal/src/tee_verifier_input_producer_dal.rs +++ /dev/null @@ -1,234 +0,0 @@ -use std::time::{Duration, Instant}; - -use sqlx::postgres::types::PgInterval; -use zksync_db_connection::{ - connection::Connection, - error::DalResult, - instrument::InstrumentExt, - utils::{duration_to_naive_time, pg_interval_from_duration}, -}; -use zksync_types::L1BatchNumber; - -use crate::Core; - -#[derive(Debug)] -pub struct TeeVerifierInputProducerDal<'a, 'c> { - pub(crate) storage: &'a mut Connection<'c, Core>, -} - -/// The amount of attempts to process a job before giving up. -pub const JOB_MAX_ATTEMPT: i16 = 5; - -/// Time to wait for job to be processed -const JOB_PROCESSING_TIMEOUT: PgInterval = pg_interval_from_duration(Duration::from_secs(10 * 60)); - -/// Status of a job that the producer will work on. - -#[derive(Debug, sqlx::Type)] -#[sqlx(type_name = "tee_verifier_input_producer_job_status")] -pub enum TeeVerifierInputProducerJobStatus { - /// When the job is queued. Metadata calculator creates the job and marks it as queued. - Queued, - /// The job is not going to be processed. This state is designed for manual operations on DB. - /// It is expected to be used if some jobs should be skipped like: - /// - testing purposes (want to check a specific L1 Batch, I can mark everything before it skipped) - /// - trim down costs on some environments (if I've done breaking changes, - /// makes no sense to wait for everything to be processed, I can just skip them and save resources) - ManuallySkipped, - /// Currently being processed by one of the jobs. Transitory state, will transition to either - /// [`TeeVerifierInputProducerStatus::Successful`] or [`TeeVerifierInputProducerStatus::Failed`]. - InProgress, - /// The final (happy case) state we expect all jobs to end up. After the run is complete, - /// the job uploaded it's inputs, it lands in successful. - Successful, - /// The job failed for reasons. It will be marked as such and the error persisted in DB. - /// If it failed less than MAX_ATTEMPTs, the job will be retried, - /// otherwise it will stay in this state as final state. - Failed, -} - -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn create_tee_verifier_input_producer_job( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult<()> { - sqlx::query!( - r#" - INSERT INTO - tee_verifier_input_producer_jobs ( - l1_batch_number, status, created_at, updated_at - ) - VALUES - ($1, $2, NOW(), NOW()) - ON CONFLICT (l1_batch_number) DO NOTHING - "#, - i64::from(l1_batch_number.0), - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - ) - .instrument("create_tee_verifier_input_producer_job") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn get_next_tee_verifier_input_producer_job( - &mut self, - ) -> DalResult> { - let l1_batch_number = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - attempts = attempts + 1, - updated_at = NOW(), - processing_started_at = NOW() - WHERE - l1_batch_number = ( - SELECT - l1_batch_number - FROM - tee_verifier_input_producer_jobs - WHERE - status = $2 - OR ( - status = $1 - AND processing_started_at < NOW() - $4::INTERVAL - ) - OR ( - status = $3 - AND attempts < $5 - ) - ORDER BY - l1_batch_number ASC - LIMIT - 1 - FOR UPDATE - SKIP LOCKED - ) - RETURNING - tee_verifier_input_producer_jobs.l1_batch_number - "#, - TeeVerifierInputProducerJobStatus::InProgress as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Queued as TeeVerifierInputProducerJobStatus, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - &JOB_PROCESSING_TIMEOUT, - JOB_MAX_ATTEMPT, - ) - .instrument("get_next_tee_verifier_input_producer_job") - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| L1BatchNumber(job.l1_batch_number as u32)); - - Ok(l1_batch_number) - } - - pub async fn get_tee_verifier_input_producer_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - tee_verifier_input_producer_jobs - WHERE - l1_batch_number = $1 - "#, - i64::from(l1_batch_number.0), - ) - .instrument("get_tee_verifier_input_producer_job_attempts") - .with_arg("l1_batch_number", &l1_batch_number) - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } - - pub async fn mark_job_as_successful( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - object_path: &str, - ) -> DalResult<()> { - sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - input_blob_url = $4 - WHERE - l1_batch_number = $2 - "#, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - object_path, - ) - .instrument("mark_job_as_successful") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .execute(self.storage) - .await?; - - Ok(()) - } - - pub async fn mark_job_as_failed( - &mut self, - l1_batch_number: L1BatchNumber, - started_at: Instant, - error: String, - ) -> DalResult> { - let attempts = sqlx::query!( - r#" - UPDATE tee_verifier_input_producer_jobs - SET - status = $1, - updated_at = NOW(), - time_taken = $3, - error = $4 - WHERE - l1_batch_number = $2 - AND status != $5 - RETURNING - tee_verifier_input_producer_jobs.attempts - "#, - TeeVerifierInputProducerJobStatus::Failed as TeeVerifierInputProducerJobStatus, - i64::from(l1_batch_number.0), - duration_to_naive_time(started_at.elapsed()), - error, - TeeVerifierInputProducerJobStatus::Successful as TeeVerifierInputProducerJobStatus, - ) - .instrument("mark_job_as_failed") - .with_arg("l1_batch_number", &l1_batch_number) - .report_latency() - .fetch_optional(self.storage) - .await? - .map(|job| job.attempts as u32); - - Ok(attempts) - } -} - -/// These functions should only be used for tests. -impl TeeVerifierInputProducerDal<'_, '_> { - pub async fn delete_all_jobs(&mut self) -> DalResult<()> { - sqlx::query!( - r#" - DELETE FROM tee_verifier_input_producer_jobs - "# - ) - .instrument("delete_all_tee_verifier_jobs") - .execute(self.storage) - .await?; - Ok(()) - } -} diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index f628638eeb6..baa2ee58485 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -50,12 +50,13 @@ pub(crate) fn create_l2_block_header(number: u32) -> L2BlockHeader { batch_fee_input: BatchFeeInput::l1_pegged(100, 100), base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(protocol_version), - pubdata_params: PubdataParams::default(), virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: PubdataParams::default(), } } + pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { L1BatchHeader::new( L1BatchNumber(number), @@ -63,6 +64,7 @@ pub(crate) fn create_l1_batch_header(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: Some(H256::repeat_byte(43)), }, ProtocolVersionId::latest(), ) diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index b571231bf9c..6a5d0d92b07 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -10,9 +10,10 @@ use zksync_db_connection::{ utils::pg_interval_from_duration, }; use zksync_types::{ - block::L2BlockExecutionData, l1::L1Tx, l2::L2Tx, protocol_upgrade::ProtocolUpgradeTx, Address, - ExecuteTransactionCommon, L1BatchNumber, L1BlockNumber, L2BlockNumber, PriorityOpId, - ProtocolVersionId, Transaction, H256, PROTOCOL_UPGRADE_TX_TYPE, U256, + block::L2BlockExecutionData, debug_flat_call::CallTraceMeta, l1::L1Tx, l2::L2Tx, + protocol_upgrade::ProtocolUpgradeTx, Address, ExecuteTransactionCommon, L1BatchNumber, + L1BlockNumber, L2BlockNumber, PriorityOpId, ProtocolVersionId, Transaction, H256, + PROTOCOL_UPGRADE_TX_TYPE, U256, }; use zksync_utils::u256_to_big_decimal; use zksync_vm_interface::{ @@ -2189,12 +2190,17 @@ impl TransactionsDal<'_, '_> { Ok(data) } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { + pub async fn get_call_trace( + &mut self, + tx_hash: H256, + ) -> DalResult> { let row = sqlx::query!( r#" SELECT protocol_version, - index_in_block + index_in_block, + miniblocks.number AS "miniblock_number!", + miniblocks.hash AS "miniblocks_hash!" FROM transactions INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number @@ -2235,7 +2241,12 @@ impl TransactionsDal<'_, '_> { .map(|call_trace| { ( parse_call_trace(&call_trace.call_trace, protocol_version), - row.index_in_block.unwrap_or_default() as usize, + CallTraceMeta { + index_in_block: row.index_in_block.unwrap_or_default() as usize, + tx_hash, + block_number: row.miniblock_number as u32, + block_hash: H256::from_slice(&row.miniblocks_hash), + }, ) })) } diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index dcf5f25f104..c2209bb9c93 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -607,6 +607,39 @@ mod tests { ); } + #[tokio::test] + async fn getting_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + let mut tx = mock_l2_transaction(); + tx.execute.contract_address = None; + let tx_hash = tx.hash(); + prepare_transactions(&mut conn, vec![tx.clone()]).await; + + let fetched_tx = conn + .transactions_dal() + .get_tx_by_hash(tx_hash) + .await + .unwrap() + .expect("no transaction"); + let mut fetched_tx = L2Tx::try_from(fetched_tx).unwrap(); + assert_eq!(fetched_tx.execute.contract_address, None); + fetched_tx.raw_bytes = tx.raw_bytes.clone(); + assert_eq!(fetched_tx, tx); + + let web3_tx = conn + .transactions_web3_dal() + .get_transaction_by_position(L2BlockNumber(1), 0, L2ChainId::from(270)) + .await; + let web3_tx = web3_tx.unwrap().expect("no transaction"); + assert_eq!(web3_tx.hash, tx_hash); + assert_eq!(web3_tx.to, None); + } + #[tokio::test] async fn getting_receipts() { let connection_pool = ConnectionPool::::test_pool().await; @@ -621,7 +654,7 @@ mod tests { let tx2 = mock_l2_transaction(); let tx2_hash = tx2.hash(); - prepare_transactions(&mut conn, vec![tx1.clone(), tx2.clone()]).await; + prepare_transactions(&mut conn, vec![tx1, tx2]).await; let mut receipts = conn .transactions_web3_dal() @@ -636,6 +669,31 @@ mod tests { assert_eq!(receipts[1].transaction_hash, tx2_hash); } + #[tokio::test] + async fn getting_receipt_for_evm_deployment_tx() { + let connection_pool = ConnectionPool::::test_pool().await; + let mut conn = connection_pool.connection().await.unwrap(); + conn.protocol_versions_dal() + .save_protocol_version_with_tx(&ProtocolVersion::default()) + .await + .unwrap(); + + let mut tx = mock_l2_transaction(); + let tx_hash = tx.hash(); + tx.execute.contract_address = None; + prepare_transactions(&mut conn, vec![tx]).await; + + let receipts = conn + .transactions_web3_dal() + .get_transaction_receipts(&[tx_hash]) + .await + .unwrap(); + assert_eq!(receipts.len(), 1); + let receipt = receipts.into_iter().next().unwrap(); + assert_eq!(receipt.transaction_hash, tx_hash); + assert_eq!(receipt.to, Some(Address::zero())); + } + #[tokio::test] async fn getting_l2_block_transactions() { let connection_pool = ConnectionPool::::test_pool().await; diff --git a/core/lib/dal/src/vm_runner_dal.rs b/core/lib/dal/src/vm_runner_dal.rs index b12b0218680..df0d3e86b88 100644 --- a/core/lib/dal/src/vm_runner_dal.rs +++ b/core/lib/dal/src/vm_runner_dal.rs @@ -42,6 +42,8 @@ impl VmRunnerDal<'_, '_> { MAX(number) AS "last_batch" FROM l1_batches + WHERE + is_sealed ), processed_batches AS ( @@ -205,6 +207,8 @@ impl VmRunnerDal<'_, '_> { MAX(number) AS "last_batch" FROM l1_batches + WHERE + is_sealed ), processed_batches AS ( diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index 980e238879b..0ea24ebf00d 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -76,6 +76,7 @@ mod tests { factory_deps_cache_size_mb: Some(128), initial_writes_cache_size_mb: Some(32), latest_values_cache_size_mb: Some(256), + latest_values_max_block_lag: Some(NonZeroU32::new(50).unwrap()), fee_history_limit: Some(100), max_batch_request_size: Some(200), max_response_body_size_mb: Some(10), @@ -137,6 +138,7 @@ mod tests { API_WEB3_JSON_RPC_FACTORY_DEPS_CACHE_SIZE_MB=128 API_WEB3_JSON_RPC_INITIAL_WRITES_CACHE_SIZE_MB=32 API_WEB3_JSON_RPC_LATEST_VALUES_CACHE_SIZE_MB=256 + API_WEB3_JSON_RPC_LATEST_VALUES_MAX_BLOCK_LAG=50 API_WEB3_JSON_RPC_FEE_HISTORY_LIMIT=100 API_WEB3_JSON_RPC_MAX_BATCH_REQUEST_SIZE=200 API_WEB3_JSON_RPC_WEBSOCKET_REQUESTS_PER_MINUTE_LIMIT=10 diff --git a/core/lib/env_config/src/chain.rs b/core/lib/env_config/src/chain.rs index a25c593bd88..a125f331496 100644 --- a/core/lib/env_config/src/chain.rs +++ b/core/lib/env_config/src/chain.rs @@ -102,6 +102,7 @@ mod tests { default_aa_hash: Some(hash( "0x0100055b041eb28aff6e3a6e0f37c31fd053fc9ef142683b05e5f0aee6934066", )), + evm_emulator_hash: None, l1_batch_commit_data_generator_mode, max_circuits_per_batch: 24100, protective_reads_persistence_enabled: true, diff --git a/core/lib/env_config/src/contracts.rs b/core/lib/env_config/src/contracts.rs index a386adad1df..250cfe8f002 100644 --- a/core/lib/env_config/src/contracts.rs +++ b/core/lib/env_config/src/contracts.rs @@ -128,9 +128,9 @@ CONTRACTS_BASE_TOKEN_ADDR="0x0000000000000000000000000000000000000001" CONTRACTS_USER_FACING_BRIDGEHUB_PROXY_ADDR="0x35ea7f92f4c5f433efe15284e99c040110cf6297" CONTRACTS_USER_FACING_DIAMOND_PROXY_ADDR="0xF00B988a98Ca742e7958DeF9F7823b5908715f4a CONTRACTS_L2_NATIVE_TOKEN_VAULT_PROXY_ADDR="0xfc073319977e314f251eae6ae6be76b0b3baeecf" -CONTRACTS_L2_DA_VALIDATOR_ADDR="0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff" CONTRACTS_CHAIN_ADMIN_ADDR="0xdd6fa5c14e7550b4caf2aa2818d24c69cbc347ff" CONTRACTS_SETTLEMENT_LAYER="0" +CONTRACTS_L2_DA_VALIDATOR_ADDR="0xed6fa5c14e7550b4caf2aa2818d24c69cbc347ff" "#; lock.set_env(config); diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 0fc3ad216f8..1043786fc1e 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -2,19 +2,34 @@ use std::env; use zksync_config::configs::{ da_client::{ - avail::AvailSecrets, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, - OBJECT_STORE_CLIENT_CONFIG_NAME, + avail::{ + AvailClientConfig, AvailSecrets, AVAIL_FULL_CLIENT_NAME, AVAIL_GAS_RELAY_CLIENT_NAME, + }, + DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, OBJECT_STORE_CLIENT_CONFIG_NAME, }, secrets::DataAvailabilitySecrets, + AvailConfig, }; use crate::{envy_load, FromEnv}; impl FromEnv for DAClientConfig { fn from_env() -> anyhow::Result { - let client_tag = std::env::var("DA_CLIENT")?; + let client_tag = env::var("DA_CLIENT")?; let config = match client_tag.as_str() { - AVAIL_CLIENT_CONFIG_NAME => Self::Avail(envy_load("da_avail_config", "DA_")?), + AVAIL_CLIENT_CONFIG_NAME => Self::Avail(AvailConfig { + bridge_api_url: env::var("DA_BRIDGE_API_URL").ok().unwrap(), + timeout: env::var("DA_TIMEOUT")?.parse()?, + config: match env::var("DA_AVAIL_CLIENT_TYPE")?.as_str() { + AVAIL_FULL_CLIENT_NAME => { + AvailClientConfig::FullClient(envy_load("da_avail_full_client", "DA_")?) + } + AVAIL_GAS_RELAY_CLIENT_NAME => { + AvailClientConfig::GasRelay(envy_load("da_avail_gas_relay", "DA_")?) + } + _ => anyhow::bail!("Unknown Avail DA client type"), + }, + }), OBJECT_STORE_CLIENT_CONFIG_NAME => { Self::ObjectStore(envy_load("da_object_store", "DA_")?) } @@ -30,11 +45,21 @@ impl FromEnv for DataAvailabilitySecrets { let client_tag = std::env::var("DA_CLIENT")?; let secrets = match client_tag.as_str() { AVAIL_CLIENT_CONFIG_NAME => { - let seed_phrase = env::var("DA_SECRETS_SEED_PHRASE") - .ok() - .map(|s| s.parse()) - .transpose()?; - Self::Avail(AvailSecrets { seed_phrase }) + let seed_phrase: Option = + env::var("DA_SECRETS_SEED_PHRASE") + .ok() + .map(|s| s.parse().unwrap()); + let gas_relay_api_key: Option = + env::var("DA_SECRETS_GAS_RELAY_API_KEY") + .ok() + .map(|s| s.parse().unwrap()); + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + anyhow::bail!("No secrets provided for Avail DA client"); + } + Self::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) } _ => anyhow::bail!("Unknown DA client name: {}", client_tag), }; @@ -47,7 +72,10 @@ impl FromEnv for DataAvailabilitySecrets { mod tests { use zksync_config::{ configs::{ - da_client::{DAClientConfig, DAClientConfig::ObjectStore}, + da_client::{ + avail::{AvailClientConfig, AvailDefaultConfig}, + DAClientConfig::{self, ObjectStore}, + }, object_store::ObjectStoreMode::GCS, }, AvailConfig, ObjectStoreConfig, @@ -91,14 +119,14 @@ mod tests { bridge_api_url: &str, app_id: u32, timeout: usize, - max_retries: usize, ) -> DAClientConfig { DAClientConfig::Avail(AvailConfig { - api_node_url: api_node_url.to_string(), bridge_api_url: bridge_api_url.to_string(), - app_id, timeout, - max_retries, + config: AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: api_node_url.to_string(), + app_id, + }), }) } @@ -107,11 +135,13 @@ mod tests { let mut lock = MUTEX.lock(); let config = r#" DA_CLIENT="Avail" - DA_API_NODE_URL="localhost:12345" + DA_AVAIL_CLIENT_TYPE="FullClient" + DA_BRIDGE_API_URL="localhost:54321" - DA_APP_ID="1" DA_TIMEOUT="2" - DA_MAX_RETRIES="3" + + DA_API_NODE_URL="localhost:12345" + DA_APP_ID="1" "#; lock.set_env(config); @@ -124,7 +154,6 @@ mod tests { "localhost:54321", "1".parse::().unwrap(), "2".parse::().unwrap(), - "3".parse::().unwrap(), ) ); } @@ -139,15 +168,18 @@ mod tests { lock.set_env(config); - let actual = match DataAvailabilitySecrets::from_env().unwrap() { - DataAvailabilitySecrets::Avail(avail) => avail.seed_phrase, + let (actual_seed, actual_key) = match DataAvailabilitySecrets::from_env().unwrap() { + DataAvailabilitySecrets::Avail(avail) => (avail.seed_phrase, avail.gas_relay_api_key), }; assert_eq!( - actual.unwrap(), - "bottom drive obey lake curtain smoke basket hold race lonely fit walk" - .parse() - .unwrap() + (actual_seed.unwrap(), actual_key), + ( + "bottom drive obey lake curtain smoke basket hold race lonely fit walk" + .parse() + .unwrap(), + None + ) ); } } diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index 7e9c4cc16ec..00b937fd725 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -44,7 +44,8 @@ impl FromEnv for GasAdjusterConfig { #[cfg(test)] mod tests { - use zksync_config::configs::eth_sender::{ProofSendingMode, PubdataSendingMode}; + use zksync_basic_types::pubdata_da::PubdataSendingMode; + use zksync_config::configs::eth_sender::ProofSendingMode; use super::*; use crate::test_utils::{hash, EnvMutex}; @@ -76,6 +77,7 @@ mod tests { tx_aggregation_paused: false, ignore_db_nonce: None, priority_tree_start_index: None, + time_in_mempool_in_l1_blocks_cap: 2000, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 20000000000, @@ -136,6 +138,7 @@ mod tests { ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG="30" ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS="4000000" ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" + ETH_SENDER_SENDER_TIME_IN_MEMPOOL_IN_L1_BLOCKS_CAP="2000" ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" ETH_SENDER_SENDER_PUBDATA_SENDING_MODE="Calldata" diff --git a/core/lib/env_config/src/genesis.rs b/core/lib/env_config/src/genesis.rs index bf30fd4cc33..55c79eceb50 100644 --- a/core/lib/env_config/src/genesis.rs +++ b/core/lib/env_config/src/genesis.rs @@ -68,6 +68,7 @@ impl FromEnv for GenesisConfig { genesis_commitment: contracts_config.genesis_batch_commitment, bootloader_hash: state_keeper.bootloader_hash, default_aa_hash: state_keeper.default_aa_hash, + evm_emulator_hash: state_keeper.evm_emulator_hash, // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network l1_chain_id: L1ChainId(network_config.network.chain_id().0), sl_chain_id: Some(network_config.network.chain_id()), diff --git a/core/lib/env_config/src/proof_data_handler.rs b/core/lib/env_config/src/proof_data_handler.rs index f69aa1d6dc5..b5bfda4544e 100644 --- a/core/lib/env_config/src/proof_data_handler.rs +++ b/core/lib/env_config/src/proof_data_handler.rs @@ -4,12 +4,18 @@ use crate::{envy_load, FromEnv}; impl FromEnv for ProofDataHandlerConfig { fn from_env() -> anyhow::Result { - envy_load("proof_data_handler", "PROOF_DATA_HANDLER_") + Ok(Self { + tee_config: envy_load("proof_data_handler.tee", "PROOF_DATA_HANDLER_")?, + ..envy_load("proof_data_handler", "PROOF_DATA_HANDLER_")? + }) } } #[cfg(test)] mod tests { + use zksync_basic_types::L1BatchNumber; + use zksync_config::configs::TeeConfig; + use super::*; use crate::test_utils::EnvMutex; @@ -19,7 +25,10 @@ mod tests { ProofDataHandlerConfig { http_port: 3320, proof_generation_timeout_in_secs: 18000, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(1337), + }, } } @@ -29,6 +38,7 @@ mod tests { PROOF_DATA_HANDLER_PROOF_GENERATION_TIMEOUT_IN_SECS="18000" PROOF_DATA_HANDLER_HTTP_PORT="3320" PROOF_DATA_HANDLER_TEE_SUPPORT="true" + PROOF_DATA_HANDLER_FIRST_TEE_PROCESSED_BATCH="1337" "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/eth_client/src/clients/http/query.rs b/core/lib/eth_client/src/clients/http/query.rs index 4048b471862..150bc8cbd54 100644 --- a/core/lib/eth_client/src/clients/http/query.rs +++ b/core/lib/eth_client/src/clients/http/query.rs @@ -368,6 +368,7 @@ where .into_iter() .take(chunk_size) .zip(fee_history.base_fee_per_blob_gas) + .take(chunk_size) { let fees = BaseFees { base_fee_per_gas: cast_to_u64(base, "base_fee_per_gas")?, @@ -424,16 +425,12 @@ where let chunk_end = (chunk_start + FEE_HISTORY_MAX_REQUEST_CHUNK).min(upto_block); let chunk_size = chunk_end - chunk_start + 1; - let fee_history = EthNamespaceClient::fee_history( - client, - U64::from(chunk_size), - zksync_types::api::BlockNumber::from(chunk_end), - None, - ) - .rpc_context("fee_history") - .with_arg("chunk_size", &chunk_size) - .with_arg("block", &chunk_end) - .await?; + let fee_history = client + .fee_history(U64::from(chunk_size).into(), chunk_end.into(), None) + .rpc_context("fee_history") + .with_arg("chunk_size", &chunk_size) + .with_arg("block", &chunk_end) + .await?; if fee_history.inner.oldest_block != web3::BlockNumber::Number(chunk_start.into()) { let oldest_block = match fee_history.inner.oldest_block { diff --git a/core/lib/eth_client/src/types.rs b/core/lib/eth_client/src/types.rs index 59fb1cdeddc..dd332351afb 100644 --- a/core/lib/eth_client/src/types.rs +++ b/core/lib/eth_client/src/types.rs @@ -320,7 +320,7 @@ pub struct FailureInfo { #[cfg(test)] mod tests { - use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; + use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_types::{ eth_sender::{EthTxBlobSidecarV1, SidecarBlobV1}, web3, K256PrivateKey, EIP_4844_TX_TYPE, H256, U256, U64, @@ -384,10 +384,7 @@ mod tests { .as_ref(), )]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction.clone()); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with @@ -493,10 +490,7 @@ mod tests { blob_versioned_hashes: Some(vec![versioned_hash_1, versioned_hash_2]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index f760134e09b..92bb47824f3 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -11,10 +11,9 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_types.workspace = true +zksync_basic_types.workspace = true +zksync_crypto_primitives.workspace = true + +async-trait.workspace = true rlp.workspace = true thiserror.workspace = true -async-trait.workspace = true - -[dev-dependencies] -tokio = { workspace = true, features = ["full"] } diff --git a/core/lib/eth_signer/src/error.rs b/core/lib/eth_signer/src/error.rs deleted file mode 100644 index 8b137891791..00000000000 --- a/core/lib/eth_signer/src/error.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/lib/eth_signer/src/lib.rs b/core/lib/eth_signer/src/lib.rs index 3a92d47b062..8b6025eb15d 100644 --- a/core/lib/eth_signer/src/lib.rs +++ b/core/lib/eth_signer/src/lib.rs @@ -1,5 +1,6 @@ use async_trait::async_trait; -use zksync_types::{Address, EIP712TypedStructure, Eip712Domain, PackedEthSignature}; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{EIP712TypedStructure, Eip712Domain, PackedEthSignature}; pub use crate::{pk_signer::PrivateKeySigner, raw_ethereum_tx::TransactionParameters}; diff --git a/core/lib/eth_signer/src/pk_signer.rs b/core/lib/eth_signer/src/pk_signer.rs index 47b0e110991..0f55425a0d5 100644 --- a/core/lib/eth_signer/src/pk_signer.rs +++ b/core/lib/eth_signer/src/pk_signer.rs @@ -1,5 +1,7 @@ -use zksync_types::{ - Address, EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, +use async_trait::async_trait; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{ + EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, }; use crate::{ @@ -12,22 +14,20 @@ pub struct PrivateKeySigner { private_key: K256PrivateKey, } +// We define inherent methods duplicating `EthereumSigner` ones because they are sync and (other than `sign_typed_data`) infallible. impl PrivateKeySigner { pub fn new(private_key: K256PrivateKey) -> Self { Self { private_key } } -} -#[async_trait::async_trait] -impl EthereumSigner for PrivateKeySigner { - /// Get Ethereum address that matches the private key. - async fn get_address(&self) -> Result { - Ok(self.private_key.address()) + /// Gets an Ethereum address that matches this private key. + pub fn address(&self) -> Address { + self.private_key.address() } /// Signs typed struct using Ethereum private key by EIP-712 signature standard. /// Result of this function is the equivalent of RPC calling `eth_signTypedData`. - async fn sign_typed_data( + pub fn sign_typed_data( &self, domain: &Eip712Domain, typed_struct: &S, @@ -39,16 +39,11 @@ impl EthereumSigner for PrivateKeySigner { } /// Signs and returns the RLP-encoded transaction. - async fn sign_transaction( - &self, - raw_tx: TransactionParameters, - ) -> Result, SignerError> { + pub fn sign_transaction(&self, raw_tx: TransactionParameters) -> Vec { // According to the code in web3 // We should use `max_fee_per_gas` as `gas_price` if we use EIP1559 let gas_price = raw_tx.max_fee_per_gas; - let max_priority_fee_per_gas = raw_tx.max_priority_fee_per_gas; - let tx = Transaction { to: raw_tx.to, nonce: raw_tx.nonce, @@ -62,21 +57,42 @@ impl EthereumSigner for PrivateKeySigner { max_fee_per_blob_gas: raw_tx.max_fee_per_blob_gas, blob_versioned_hashes: raw_tx.blob_versioned_hashes, }; - let signed = tx.sign(&self.private_key, raw_tx.chain_id); - Ok(signed.raw_transaction.0) + signed.raw_transaction.0 + } +} + +#[async_trait] +impl EthereumSigner for PrivateKeySigner { + async fn get_address(&self) -> Result { + Ok(self.address()) + } + + async fn sign_typed_data( + &self, + domain: &Eip712Domain, + typed_struct: &S, + ) -> Result { + self.sign_typed_data(domain, typed_struct) + } + + async fn sign_transaction( + &self, + raw_tx: TransactionParameters, + ) -> Result, SignerError> { + Ok(self.sign_transaction(raw_tx)) } } #[cfg(test)] mod test { - use zksync_types::{K256PrivateKey, H160, H256, U256, U64}; + use zksync_basic_types::{H160, H256, U256, U64}; + use zksync_crypto_primitives::K256PrivateKey; - use super::PrivateKeySigner; - use crate::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; + use super::*; - #[tokio::test] - async fn test_generating_signed_raw_transaction() { + #[test] + fn test_generating_signed_raw_transaction() { let private_key = K256PrivateKey::from_bytes(H256::from([5; 32])).unwrap(); let signer = PrivateKeySigner::new(private_key); let raw_transaction = TransactionParameters { @@ -94,10 +110,7 @@ mod test { blob_versioned_hashes: None, max_fee_per_blob_gas: None, }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); assert_ne!(raw_tx.len(), 1); // pre-calculated signature with right algorithm implementation let precalculated_raw_tx: Vec = vec![ diff --git a/core/lib/eth_signer/src/raw_ethereum_tx.rs b/core/lib/eth_signer/src/raw_ethereum_tx.rs index 9479b5bd9d7..bea64305b47 100644 --- a/core/lib/eth_signer/src/raw_ethereum_tx.rs +++ b/core/lib/eth_signer/src/raw_ethereum_tx.rs @@ -10,11 +10,11 @@ //! Link to @Deniallugo's PR to web3: https://github.com/tomusdrw/rust-web3/pull/630 use rlp::RlpStream; -use zksync_types::{ - ethabi::Address, +use zksync_basic_types::{ web3::{keccak256, AccessList, Signature, SignedTransaction}, - K256PrivateKey, H256, U256, U64, + Address, H256, U256, U64, }; +use zksync_crypto_primitives::K256PrivateKey; const LEGACY_TX_ID: u64 = 0; const ACCESSLISTS_TX_ID: u64 = 1; diff --git a/core/lib/external_price_api/Cargo.toml b/core/lib/external_price_api/Cargo.toml index 3eee675b4e6..1e849f60006 100644 --- a/core/lib/external_price_api/Cargo.toml +++ b/core/lib/external_price_api/Cargo.toml @@ -20,8 +20,12 @@ serde.workspace = true reqwest = { workspace = true, features = ["json"] } fraction.workspace = true rand.workspace = true +tracing.workspace = true zksync_config.workspace = true zksync_types.workspace = true tokio.workspace = true + +[dev-dependencies] httpmock.workspace = true +serde_json.workspace = true diff --git a/core/lib/external_price_api/src/cmc_api.rs b/core/lib/external_price_api/src/cmc_api.rs new file mode 100644 index 00000000000..05cb5e4d728 --- /dev/null +++ b/core/lib/external_price_api/src/cmc_api.rs @@ -0,0 +1,357 @@ +use std::{collections::HashMap, str::FromStr}; + +use async_trait::async_trait; +use chrono::Utc; +use serde::Deserialize; +use tokio::sync::RwLock; +use url::Url; +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; + +use crate::{address_to_string, utils::get_fraction, PriceAPIClient}; + +const AUTH_HEADER: &str = "x-cmc_pro_api_key"; +const DEFAULT_API_URL: &str = "https://pro-api.coinmarketcap.com"; +const ALLOW_TOKENS_ONLY_ON_PLATFORM_ID: i32 = 1; // 1 = Ethereum +const REQUEST_QUOTE_IN_CURRENCY_ID: &str = "1027"; // 1027 = ETH + +#[derive(Debug)] +pub struct CmcPriceApiClient { + base_url: Url, + client: reqwest::Client, + cache_token_id_by_address: RwLock>, +} + +impl CmcPriceApiClient { + pub fn new(config: ExternalPriceApiClientConfig) -> Self { + let client = if let Some(api_key) = &config.api_key { + use reqwest::header::{HeaderMap, HeaderName, HeaderValue}; + + let default_headers = HeaderMap::from_iter([( + HeaderName::from_static(AUTH_HEADER), + HeaderValue::from_str(api_key).expect("Failed to create header value"), + )]); + + reqwest::Client::builder().default_headers(default_headers) + } else { + reqwest::Client::builder() + } + .timeout(config.client_timeout()) + .build() + .expect("Failed to build reqwest client"); + + let base_url = config.base_url.unwrap_or(DEFAULT_API_URL.to_string()); + let base_url = Url::parse(&base_url).expect("Failed to parse CoinMarketCap API URL"); + + Self { + base_url, + client, + cache_token_id_by_address: RwLock::default(), + } + } + + fn get(&self, path: &str) -> reqwest::RequestBuilder { + self.client + .get(self.base_url.join(path).expect("Failed to join URL path")) + } + + async fn get_token_id(&self, address: Address) -> anyhow::Result { + if let Some(x) = self.cache_token_id_by_address.read().await.get(&address) { + return Ok(*x); + } + + let response = self.get("/v1/cryptocurrency/map").send().await?; + let status = response.status(); + if !status.is_success() { + return Err(anyhow::anyhow!( + "Http error while fetching token id. Status: {status}, token: {address}, msg: {}", + response.text().await.unwrap_or_default(), + )); + } + + let parsed = response.json::().await?; + for token_info in parsed.data { + if let Some(platform) = token_info.platform { + if platform.id == ALLOW_TOKENS_ONLY_ON_PLATFORM_ID + && Address::from_str(&platform.token_address).is_ok_and(|a| a == address) + { + if token_info.is_active != 1 { + tracing::warn!( + "CoinMarketCap API reports token {} ({}) on platform {} ({}) is not active", + address_to_string(&address), + token_info.name, + platform.id, + platform.name, + ); + } + + self.cache_token_id_by_address + .write() + .await + .insert(address, token_info.id); + return Ok(token_info.id); + } + } + } + + Err(anyhow::anyhow!("Token ID not found for address {address}")) + } + + async fn get_token_price_by_address(&self, address: Address) -> anyhow::Result { + let id = self.get_token_id(address).await?; + self.get_token_price_by_id(id).await + } + + async fn get_token_price_by_id(&self, id: i32) -> anyhow::Result { + let response = self + .get("/v2/cryptocurrency/quotes/latest") + .query(&[("id", id)]) + .query(&[("convert_id", REQUEST_QUOTE_IN_CURRENCY_ID)]) + .send() + .await?; + + let status = response.status(); + if !status.is_success() { + return Err(anyhow::anyhow!( + "Http error while fetching token price. Status: {status}, token: {id}, msg: {}", + response.text().await.unwrap_or_default(), + )); + } + + response + .json::() + .await? + .data + .get(&id) + .and_then(|data| data.quote.get(REQUEST_QUOTE_IN_CURRENCY_ID)) + .map(|mq| mq.price) + .ok_or_else(|| anyhow::anyhow!("Price not found for token: {id}")) + } +} + +#[derive(Debug, Deserialize)] +struct V2CryptocurrencyQuotesLatestResponse { + data: HashMap, +} + +#[derive(Debug, Deserialize)] +struct CryptocurrencyQuoteObject { + quote: HashMap, +} + +#[derive(Debug, Deserialize)] +struct MarketQuote { + price: f64, +} + +#[derive(Debug, Deserialize)] +struct V1CryptocurrencyMapResponse { + data: Vec, +} + +#[derive(Debug, Deserialize)] +struct CryptocurrencyObject { + id: i32, + name: String, + is_active: u8, + platform: Option, +} + +#[derive(Debug, Deserialize)] +struct CryptocurrencyPlatform { + id: i32, + name: String, + token_address: String, +} + +#[async_trait] +impl PriceAPIClient for CmcPriceApiClient { + async fn fetch_ratio(&self, token_address: Address) -> anyhow::Result { + let base_token_in_eth = self.get_token_price_by_address(token_address).await?; + let (term_ether, term_base_token) = get_fraction(base_token_in_eth)?; + + return Ok(BaseTokenAPIRatio { + numerator: term_base_token, + denominator: term_ether, + ratio_timestamp: Utc::now(), + }); + } +} + +#[cfg(test)] +mod tests { + use httpmock::prelude::*; + use serde_json::json; + + use super::*; + use crate::tests::*; + + fn make_client(server: &MockServer, api_key: Option) -> Box { + Box::new(CmcPriceApiClient::new(ExternalPriceApiClientConfig { + source: "coinmarketcap".to_string(), + base_url: Some(server.base_url()), + api_key, + client_timeout_ms: 5000, + forced: None, + })) + } + + fn make_mock_server() -> MockServer { + let mock_server = MockServer::start(); + // cryptocurrency map + mock_server.mock(|when, then| { + when.method(GET) + .header_exists(AUTH_HEADER) + .path("/v1/cryptocurrency/map"); + then.status(200) + .header("content-type", "application/json") + .json_body(json!({ + "status": { + "timestamp": "2024-09-25T11:29:38.440Z", + "error_code": 0, + "error_message": null, + "elapsed": 351, + "credit_count": 1, + "notice": null + }, + "data": [ + { + "id": 7083, + "rank": 26, + "name": "Uniswap", + "symbol": "UNI", + "slug": "uniswap", + "is_active": 1, + "first_historical_data": "2020-09-17T01:10:00.000Z", + "last_historical_data": "2024-09-25T11:25:00.000Z", + "platform": { + "id": 1, + "name": "Ethereum", + "symbol": "ETH", + "slug": "ethereum", + "token_address": "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984" + } + } + ] + })); + }); + + // cryptocurrency quote + mock_server.mock(|when, then| { + // TODO: check for api authentication header + when.method(GET) + .header_exists(AUTH_HEADER) + .path("/v2/cryptocurrency/quotes/latest") + .query_param("id", "7083") // Uniswap + .query_param("convert_id", "1027"); // Ether + then.status(200) + .header("content-type", "application/json") + .json_body(json!({ + "status": { + "timestamp": "2024-10-02T14:15:07.189Z", + "error_code": 0, + "error_message": null, + "elapsed": 39, + "credit_count": 1, + "notice": null + }, + "data": { + "7083": { + "id": 7083, + "name": "Uniswap", + "symbol": "UNI", + "slug": "uniswap", + "date_added": "2020-09-17T00:00:00.000Z", + "tags": [], + "max_supply": null, + "circulating_supply": 600294743.71, + "total_supply": 1000000000, + "platform": { + "id": 1027, + "name": "Ethereum", + "symbol": "ETH", + "slug": "ethereum", + "token_address": "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984" + }, + "is_active": 1, + "infinite_supply": false, + "cmc_rank": 22, + "is_fiat": 0, + "last_updated": "2024-10-02T14:13:00.000Z", + "quote": { + "1027": { + "price": 0.0028306661720164175, + "last_updated": "2024-10-02T14:12:00.000Z" + } + } + } + } + })); + }); + + mock_server + } + + #[tokio::test] + async fn mock_happy() { + let server = make_mock_server(); + let client = make_client( + &server, + Some("00000000-0000-0000-0000-000000000000".to_string()), + ); + + let token_address: Address = TEST_TOKEN_ADDRESS.parse().unwrap(); + + let api_price = client.fetch_ratio(token_address).await.unwrap(); + + const REPORTED_PRICE: f64 = 1_f64 / 0.0028306661720164175_f64; + const EPSILON: f64 = 0.000001_f64 * REPORTED_PRICE; + + assert!((approximate_value(&api_price) - REPORTED_PRICE).abs() < EPSILON); + } + + #[tokio::test] + #[should_panic = "Request did not match any route or mock"] + async fn mock_fail_no_api_key() { + let server = make_mock_server(); + let client = make_client(&server, None); + + let token_address: Address = TEST_TOKEN_ADDRESS.parse().unwrap(); + + client.fetch_ratio(token_address).await.unwrap(); + } + + #[tokio::test] + #[should_panic = "Token ID not found for address"] + async fn mock_fail_not_found() { + let server = make_mock_server(); + let client = make_client( + &server, + Some("00000000-0000-0000-0000-000000000000".to_string()), + ); + + let token_address: Address = Address::random(); + + client.fetch_ratio(token_address).await.unwrap(); + } + + #[tokio::test] + #[ignore = "run manually (accesses network); specify CoinMarketCap API key in env var CMC_API_KEY"] + async fn real_cmc_tether() { + let client = CmcPriceApiClient::new(ExternalPriceApiClientConfig { + api_key: Some(std::env::var("CMC_API_KEY").unwrap()), + base_url: None, + client_timeout_ms: 5000, + source: "coinmarketcap".to_string(), + forced: None, + }); + + let tether: Address = "0xdac17f958d2ee523a2206206994597c13d831ec7" + .parse() + .unwrap(); + + let r = client.get_token_price_by_address(tether).await.unwrap(); + + println!("{r}"); + } +} diff --git a/core/lib/external_price_api/src/lib.rs b/core/lib/external_price_api/src/lib.rs index 7a068f9b1cb..01fc433802b 100644 --- a/core/lib/external_price_api/src/lib.rs +++ b/core/lib/external_price_api/src/lib.rs @@ -1,3 +1,4 @@ +pub mod cmc_api; pub mod coingecko_api; pub mod forced_price_client; #[cfg(test)] diff --git a/core/lib/external_price_api/src/tests.rs b/core/lib/external_price_api/src/tests.rs index bb2af866cf5..fd6a8b9928f 100644 --- a/core/lib/external_price_api/src/tests.rs +++ b/core/lib/external_price_api/src/tests.rs @@ -2,13 +2,13 @@ use std::str::FromStr; use chrono::Utc; use httpmock::MockServer; -use zksync_types::Address; +use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; use crate::PriceAPIClient; const TIME_TOLERANCE_MS: i64 = 100; /// Uniswap (UNI) -const TEST_TOKEN_ADDRESS: &str = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"; +pub const TEST_TOKEN_ADDRESS: &str = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"; /// 1UNI = 0.00269ETH const TEST_TOKEN_PRICE_ETH: f64 = 0.00269; /// 1ETH = 371.74UNI; When converting gas price from ETH to UNI @@ -16,6 +16,10 @@ const TEST_TOKEN_PRICE_ETH: f64 = 0.00269; const TEST_BASE_PRICE: f64 = 371.74; const PRICE_FLOAT_COMPARE_TOLERANCE: f64 = 0.1; +pub(crate) fn approximate_value(api_price: &BaseTokenAPIRatio) -> f64 { + api_price.numerator.get() as f64 / api_price.denominator.get() as f64 +} + pub(crate) struct SetupResult { pub(crate) client: Box, } diff --git a/core/lib/l1_contract_interface/Cargo.toml b/core/lib/l1_contract_interface/Cargo.toml index 4a190f82efc..f0e734e0668 100644 --- a/core/lib/l1_contract_interface/Cargo.toml +++ b/core/lib/l1_contract_interface/Cargo.toml @@ -20,12 +20,14 @@ crypto_codegen.workspace = true # Used to calculate the kzg commitment and proofs kzg.workspace = true +anyhow.workspace = true sha2.workspace = true sha3.workspace = true hex.workspace = true once_cell.workspace = true [dev-dependencies] +rand.workspace = true serde.workspace = true serde_json.workspace = true serde_with = { workspace = true, features = ["base64", "hex"] } diff --git a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs index a805876ca40..5a05cb0ffa5 100644 --- a/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs +++ b/core/lib/l1_contract_interface/src/i_executor/methods/commit_batches.rs @@ -1,7 +1,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi::{encode, Token}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, }; use crate::{ @@ -14,7 +14,7 @@ use crate::{ pub struct CommitBatches<'a> { pub last_committed_l1_batch: &'a L1BatchWithMetadata, pub l1_batches: &'a [L1BatchWithMetadata], - pub pubdata_da: PubdataDA, + pub pubdata_da: PubdataSendingMode, pub mode: L1BatchCommitmentMode, } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index f9dcdaaed10..0240acba350 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -6,7 +6,7 @@ use zksync_types::{ L1BatchWithMetadata, }, ethabi::{ParamType, Token}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, web3::{contract::Error as ContractError, keccak256}, ProtocolVersionId, H256, U256, }; @@ -26,14 +26,14 @@ const PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY: u8 = 2; pub struct CommitBatchInfo<'a> { mode: L1BatchCommitmentMode, l1_batch_with_metadata: &'a L1BatchWithMetadata, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, } impl<'a> CommitBatchInfo<'a> { pub fn new( mode: L1BatchCommitmentMode, l1_batch_with_metadata: &'a L1BatchWithMetadata, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, ) -> Self { Self { mode, @@ -221,22 +221,22 @@ impl Tokenizable for CommitBatchInfo<'_> { // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. ( L1BatchCommitmentMode::Validium, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { vec![PUBDATA_SOURCE_CALLDATA] } - (L1BatchCommitmentMode::Validium, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Blobs) => { vec![PUBDATA_SOURCE_BLOBS] } - (L1BatchCommitmentMode::Rollup, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Custom) => { panic!("Custom pubdata DA is incompatible with Rollup mode") } - (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { vec![PUBDATA_SOURCE_CUSTOM_PRE_GATEWAY] } ( L1BatchCommitmentMode::Rollup, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { // We compute and add the blob commitment to the pubdata payload so that we can verify the proof // even if we are not using blobs. @@ -247,7 +247,7 @@ impl Tokenizable for CommitBatchInfo<'_> { .chain(blob_commitment) .collect() } - (L1BatchCommitmentMode::Rollup, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Blobs) => { let pubdata = self.pubdata_input(); let pubdata_commitments = pubdata.chunks(ZK_SYNC_BYTES_PER_BLOB).flat_map(|blob| { @@ -267,7 +267,7 @@ impl Tokenizable for CommitBatchInfo<'_> { .expect("Failed to get state_diff_hash from metadata"); tokens.push(Token::Bytes(match (self.mode, self.pubdata_da) { // Validiums with custom DA need the inclusion data to be part of operator_da_input - (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Validium, PubdataSendingMode::Custom) => { let mut operator_da_input: Vec = state_diff_hash.0.into(); operator_da_input.extend( @@ -284,14 +284,16 @@ impl Tokenizable for CommitBatchInfo<'_> { // Here we're not pushing any pubdata on purpose; no pubdata is sent in Validium mode. ( L1BatchCommitmentMode::Validium, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata | PubdataDA::Blobs, + PubdataSendingMode::Calldata + | PubdataSendingMode::RelayedL2Calldata + | PubdataSendingMode::Blobs, ) => state_diff_hash.0.into(), - (L1BatchCommitmentMode::Rollup, PubdataDA::Custom) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Custom) => { panic!("Custom pubdata DA is incompatible with Rollup mode") } ( L1BatchCommitmentMode::Rollup, - PubdataDA::Calldata | PubdataDA::RelayedL2Calldata, + PubdataSendingMode::Calldata | PubdataSendingMode::RelayedL2Calldata, ) => { let pubdata = self.pubdata_input(); @@ -308,7 +310,7 @@ impl Tokenizable for CommitBatchInfo<'_> { .chain(blob_commitment) .collect() } - (L1BatchCommitmentMode::Rollup, PubdataDA::Blobs) => { + (L1BatchCommitmentMode::Rollup, PubdataSendingMode::Blobs) => { let pubdata = self.pubdata_input(); let header = diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs index b6d2eefac30..b71d0938049 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/mod.rs @@ -4,4 +4,7 @@ mod commit_batch_info; mod stored_batch_info; pub const SUPPORTED_ENCODING_VERSION: u8 = 0; +#[cfg(test)] +mod tests; + pub use self::{commit_batch_info::CommitBatchInfo, stored_batch_info::StoredBatchInfo}; diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs index 18b28f34c29..5ac40bce66e 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/stored_batch_info.rs @@ -1,7 +1,8 @@ +use anyhow::Context as _; use zksync_types::{ commitment::L1BatchWithMetadata, ethabi::{self, ParamType, Token}, - web3, + parse_h256, web3, web3::contract::Error as ContractError, H256, U256, }; @@ -9,7 +10,7 @@ use zksync_types::{ use crate::Tokenizable; /// `StoredBatchInfo` from `IExecutor.sol`. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct StoredBatchInfo { pub batch_number: u64, pub batch_hash: H256, @@ -22,11 +23,22 @@ pub struct StoredBatchInfo { } impl StoredBatchInfo { + /// Encodes the struct into RLP. + pub fn encode(&self) -> Vec { + ethabi::encode(&[self.clone().into_token()]) + } + + /// Decodes the struct from RLP. + pub fn decode(rlp: &[u8]) -> anyhow::Result { + let [token] = ethabi::decode_whole(&[Self::schema()], rlp)? + .try_into() + .unwrap(); + Ok(Self::from_token(token)?) + } + /// `_hashStoredBatchInfo` from `Executor.sol`. pub fn hash(&self) -> H256 { - H256(web3::keccak256(ðabi::encode(&[self - .clone() - .into_token()]))) + H256(web3::keccak256(&self.encode())) } pub fn schema() -> ParamType { @@ -59,11 +71,42 @@ impl From<&L1BatchWithMetadata> for StoredBatchInfo { } impl Tokenizable for StoredBatchInfo { - fn from_token(_token: Token) -> Result { - // Currently there is no need to decode this struct. - // We still want to implement `Tokenizable` trait for it, so that *once* it's needed - // the implementation is provided here and not in some other inconsistent way. - Err(ContractError::Other("Not implemented".into())) + fn from_token(token: Token) -> Result { + (|| { + let [ + Token::Uint(batch_number), + Token::FixedBytes(batch_hash), + Token::Uint(index_repeated_storage_changes), + Token::Uint(number_of_layer1_txs), + Token::FixedBytes(priority_operations_hash), + Token::FixedBytes(l2_logs_tree_root), + Token::Uint(timestamp), + Token::FixedBytes(commitment), + ] : [Token;8] = token + .into_tuple().context("not a tuple")? + .try_into().ok().context("bad length")? + else { anyhow::bail!("bad format") }; + Ok(Self { + batch_number: batch_number + .try_into() + .ok() + .context("overflow") + .context("batch_number")?, + batch_hash: parse_h256(&batch_hash).context("batch_hash")?, + index_repeated_storage_changes: index_repeated_storage_changes + .try_into() + .ok() + .context("overflow") + .context("index_repeated_storage_changes")?, + number_of_layer1_txs, + priority_operations_hash: parse_h256(&priority_operations_hash) + .context("priority_operations_hash")?, + l2_logs_tree_root: parse_h256(&l2_logs_tree_root).context("l2_logs_tree_root")?, + timestamp, + commitment: parse_h256(&commitment).context("commitment")?, + }) + })() + .map_err(|err| ContractError::InvalidOutputType(format!("{err:#}"))) } fn into_token(self) -> Token { diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs b/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs new file mode 100644 index 00000000000..0cb8caffb34 --- /dev/null +++ b/core/lib/l1_contract_interface/src/i_executor/structures/tests.rs @@ -0,0 +1,32 @@ +use rand::{ + distributions::{Distribution, Standard}, + Rng, +}; + +use super::*; + +impl Distribution for Standard { + fn sample(&self, rng: &mut R) -> StoredBatchInfo { + StoredBatchInfo { + batch_number: rng.gen(), + batch_hash: rng.gen(), + index_repeated_storage_changes: rng.gen(), + number_of_layer1_txs: rng.gen::().into(), + priority_operations_hash: rng.gen(), + l2_logs_tree_root: rng.gen(), + timestamp: rng.gen::().into(), + commitment: rng.gen(), + } + } +} + +/// Test checking encoding and decoding of `StoredBatchInfo`. +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + for _ in 0..10 { + let want: StoredBatchInfo = rng.gen(); + let got = StoredBatchInfo::decode(&want.encode()).unwrap(); + assert_eq!(want, got); + } +} diff --git a/core/lib/l1_contract_interface/src/multicall3/mod.rs b/core/lib/l1_contract_interface/src/multicall3/mod.rs index 7d922668f94..52df37e0430 100644 --- a/core/lib/l1_contract_interface/src/multicall3/mod.rs +++ b/core/lib/l1_contract_interface/src/multicall3/mod.rs @@ -7,6 +7,7 @@ use zksync_types::{ }; /// Multicall3 contract aggregate method input vector struct. +#[derive(Debug)] pub struct Multicall3Call { pub target: Address, pub allow_failure: bool, @@ -21,6 +22,7 @@ impl Tokenizable for Multicall3Call { self.calldata.into_token(), ]) } + fn from_token(token: Token) -> Result { let Token::Tuple(mut result_token) = token else { return Err(error(&[token], "Multicall3Call")); diff --git a/core/lib/mempool/src/mempool_store.rs b/core/lib/mempool/src/mempool_store.rs index 334a4783a76..f6f9b72f9b6 100644 --- a/core/lib/mempool/src/mempool_store.rs +++ b/core/lib/mempool/src/mempool_store.rs @@ -1,4 +1,4 @@ -use std::collections::{hash_map, BTreeSet, HashMap, HashSet}; +use std::collections::{hash_map, BTreeSet, HashMap}; use zksync_types::{ l1::L1Tx, l2::L2Tx, Address, ExecuteTransactionCommon, Nonce, PriorityOpId, Transaction, @@ -221,22 +221,57 @@ impl MempoolStore { } fn gc(&mut self) -> Vec
{ - if self.size >= self.capacity { - let index: HashSet<_> = self + if self.size > self.capacity { + let mut transactions = std::mem::take(&mut self.l2_transactions_per_account); + let mut possibly_kept: Vec<_> = self .l2_priority_queue .iter() - .map(|pointer| pointer.account) + .rev() + .filter_map(|pointer| { + transactions + .remove(&pointer.account) + .map(|txs| (pointer.account, txs)) + }) .collect(); - let transactions = std::mem::take(&mut self.l2_transactions_per_account); - let (kept, drained) = transactions + + let mut sum = 0; + let mut number_of_accounts_kept = 0; + for (_, txs) in &possibly_kept { + sum += txs.len(); + if sum <= self.capacity as usize { + number_of_accounts_kept += 1; + } else { + break; + } + } + if number_of_accounts_kept == 0 && !possibly_kept.is_empty() { + tracing::warn!("mempool capacity is too low to handle txs from single account, consider increasing capacity"); + // Keep at least one entry, otherwise mempool won't return any new L2 tx to process. + number_of_accounts_kept = 1; + } + let (kept, drained) = { + let mut drained: Vec<_> = transactions.into_keys().collect(); + let also_drained = possibly_kept + .split_off(number_of_accounts_kept) + .into_iter() + .map(|(address, _)| address); + drained.extend(also_drained); + + (possibly_kept, drained) + }; + + let l2_priority_queue = std::mem::take(&mut self.l2_priority_queue); + self.l2_priority_queue = l2_priority_queue .into_iter() - .partition(|(address, _)| index.contains(address)); - self.l2_transactions_per_account = kept; + .rev() + .take(number_of_accounts_kept) + .collect(); + self.l2_transactions_per_account = kept.into_iter().collect(); self.size = self .l2_transactions_per_account .iter() - .fold(0, |agg, (_, tnxs)| agg + tnxs.len() as u64); - return drained.into_keys().collect(); + .fold(0, |agg, (_, txs)| agg + txs.len() as u64); + return drained; } vec![] } diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index 96ef600984f..b84ab7d5765 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -321,32 +321,26 @@ fn stashed_accounts() { #[test] fn mempool_capacity() { - let mut mempool = MempoolStore::new(PriorityOpId(0), 5); + let mut mempool = MempoolStore::new(PriorityOpId(0), 4); let account0 = Address::random(); let account1 = Address::random(); let account2 = Address::random(); + let account3 = Address::random(); let transactions = vec![ gen_l2_tx(account0, Nonce(0)), gen_l2_tx(account0, Nonce(1)), gen_l2_tx(account0, Nonce(2)), - gen_l2_tx(account1, Nonce(1)), - gen_l2_tx(account2, Nonce(1)), + gen_l2_tx_with_timestamp(account1, Nonce(0), unix_timestamp_ms() + 1), + gen_l2_tx_with_timestamp(account2, Nonce(0), unix_timestamp_ms() + 2), + gen_l2_tx(account3, Nonce(1)), ]; mempool.insert(transactions, HashMap::new()); - // the mempool is full. Accounts with non-sequential nonces got stashed + // Mempool is full. Accounts with non-sequential nonces and some accounts with lowest score should be purged. assert_eq!( HashSet::<_>::from_iter(mempool.get_mempool_info().purged_accounts), - HashSet::<_>::from_iter(vec![account1, account2]), - ); - // verify that existing good-to-go transactions and new ones got picked - mempool.insert( - vec![gen_l2_tx_with_timestamp( - account1, - Nonce(0), - unix_timestamp_ms() + 1, - )], - HashMap::new(), + HashSet::from([account2, account3]), ); + // verify that good-to-go transactions are kept. for _ in 0..3 { assert_eq!( mempool @@ -363,6 +357,34 @@ fn mempool_capacity() { .initiator_account(), account1 ); + assert!(!mempool.has_next(&L2TxFilter::default())); +} + +#[test] +fn mempool_does_not_purge_all_accounts() { + let mut mempool = MempoolStore::new(PriorityOpId(0), 1); + let account0 = Address::random(); + let account1 = Address::random(); + let transactions = vec![ + gen_l2_tx(account0, Nonce(0)), + gen_l2_tx(account0, Nonce(1)), + gen_l2_tx(account1, Nonce(1)), + ]; + mempool.insert(transactions, HashMap::new()); + // Mempool is full. Account 1 has tx with non-sequential nonce so it should be purged. + // Txs from account 0 have sequential nonces but their number is greater than capacity; they should be kept. + assert_eq!(mempool.get_mempool_info().purged_accounts, vec![account1]); + // verify that good-to-go transactions are kept. + for _ in 0..2 { + assert_eq!( + mempool + .next_transaction(&L2TxFilter::default()) + .unwrap() + .initiator_account(), + account0 + ); + } + assert!(!mempool.has_next(&L2TxFilter::default())); } fn gen_l2_tx(address: Address, nonce: Nonce) -> Transaction { diff --git a/core/lib/merkle_tree/src/domain.rs b/core/lib/merkle_tree/src/domain.rs index a4d577fc3ba..bb69bda209c 100644 --- a/core/lib/merkle_tree/src/domain.rs +++ b/core/lib/merkle_tree/src/domain.rs @@ -9,10 +9,11 @@ use crate::{ consistency::ConsistencyError, storage::{PatchSet, Patched, RocksDBWrapper}, types::{ - Key, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, ValueHash, - TREE_DEPTH, + Key, NodeKey, RawNode, Root, TreeEntry, TreeEntryWithProof, TreeInstruction, TreeLogEntry, + ValueHash, TREE_DEPTH, }, BlockOutput, HashTree, MerkleTree, MerkleTreePruner, MerkleTreePrunerHandle, NoVersionError, + PruneDatabase, }; impl TreeInstruction { @@ -444,6 +445,28 @@ impl ZkSyncTreeReader { self.0.entries_with_proofs(version, keys) } + /// Returns raw nodes for the specified `keys`. + pub fn raw_nodes(&self, keys: &[NodeKey]) -> Vec> { + let raw_nodes = self.0.db.raw_nodes(keys).into_iter(); + raw_nodes + .zip(keys) + .map(|(slice, key)| { + let slice = slice?; + Some(if key.is_empty() { + RawNode::deserialize_root(&slice) + } else { + RawNode::deserialize(&slice) + }) + }) + .collect() + } + + /// Returns raw stale keys obsoleted in the specified version of the tree. + pub fn raw_stale_keys(&self, l1_batch_number: L1BatchNumber) -> Vec { + let version = u64::from(l1_batch_number.0); + self.0.db.stale_keys(version) + } + /// Verifies consistency of the tree at the specified L1 batch number. /// /// # Errors diff --git a/core/lib/merkle_tree/src/errors.rs b/core/lib/merkle_tree/src/errors.rs index b8130717f93..c187ce4977b 100644 --- a/core/lib/merkle_tree/src/errors.rs +++ b/core/lib/merkle_tree/src/errors.rs @@ -22,6 +22,8 @@ pub enum DeserializeErrorKind { /// Bit mask specifying a child kind in an internal tree node is invalid. #[error("invalid bit mask specifying a child kind in an internal tree node")] InvalidChildKind, + #[error("data left after deserialization")] + Leftovers, /// Missing required tag in the tree manifest. #[error("missing required tag `{0}` in tree manifest")] diff --git a/core/lib/merkle_tree/src/lib.rs b/core/lib/merkle_tree/src/lib.rs index 6f9da59cf0e..824f23eaf52 100644 --- a/core/lib/merkle_tree/src/lib.rs +++ b/core/lib/merkle_tree/src/lib.rs @@ -82,7 +82,7 @@ mod utils; pub mod unstable { pub use crate::{ errors::DeserializeError, - types::{Manifest, Node, NodeKey, ProfiledTreeOperation, Root}, + types::{Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root}, }; } diff --git a/core/lib/merkle_tree/src/storage/rocksdb.rs b/core/lib/merkle_tree/src/storage/rocksdb.rs index 711ccaa6137..22335c82940 100644 --- a/core/lib/merkle_tree/src/storage/rocksdb.rs +++ b/core/lib/merkle_tree/src/storage/rocksdb.rs @@ -53,6 +53,23 @@ impl NamedColumnFamily for MerkleTreeColumnFamily { type LocalProfiledOperation = RefCell>>; +/// Unifies keys that can be used to load raw data from RocksDB. +pub(crate) trait ToDbKey: Sync { + fn to_db_key(&self) -> Vec; +} + +impl ToDbKey for NodeKey { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(*self) + } +} + +impl ToDbKey for (NodeKey, bool) { + fn to_db_key(&self) -> Vec { + NodeKey::to_db_key(self.0) + } +} + /// Main [`Database`] implementation wrapping a [`RocksDB`] reference. /// /// # Cloning @@ -112,7 +129,7 @@ impl RocksDBWrapper { .expect("Failed reading from RocksDB") } - fn raw_nodes(&self, keys: &NodeKeys) -> Vec>> { + pub(crate) fn raw_nodes(&self, keys: &[T]) -> Vec>> { // Propagate the currently profiled operation to rayon threads used in the parallel iterator below. let profiled_operation = self .profiled_operation @@ -126,7 +143,7 @@ impl RocksDBWrapper { let _guard = profiled_operation .as_ref() .and_then(ProfiledOperation::start_profiling); - let keys = chunk.iter().map(|(key, _)| key.to_db_key()); + let keys = chunk.iter().map(ToDbKey::to_db_key); let results = self.db.multi_get_cf(MerkleTreeColumnFamily::Tree, keys); results .into_iter() @@ -144,9 +161,9 @@ impl RocksDBWrapper { // If we didn't succeed with the patch set, or the key version is old, // access the underlying storage. let node = if is_leaf { - LeafNode::deserialize(raw_node).map(Node::Leaf) + LeafNode::deserialize(raw_node, false).map(Node::Leaf) } else { - InternalNode::deserialize(raw_node).map(Node::Internal) + InternalNode::deserialize(raw_node, false).map(Node::Internal) }; node.map_err(|err| { err.with_context(if is_leaf { @@ -187,7 +204,7 @@ impl Database for RocksDBWrapper { let Some(raw_root) = self.raw_node(&NodeKey::empty(version).to_db_key()) else { return Ok(None); }; - Root::deserialize(&raw_root) + Root::deserialize(&raw_root, false) .map(Some) .map_err(|err| err.with_context(ErrorContext::Root(version))) } diff --git a/core/lib/merkle_tree/src/storage/serialization.rs b/core/lib/merkle_tree/src/storage/serialization.rs index f21fece94e0..d0c573fd817 100644 --- a/core/lib/merkle_tree/src/storage/serialization.rs +++ b/core/lib/merkle_tree/src/storage/serialization.rs @@ -5,7 +5,7 @@ use std::{collections::HashMap, str}; use crate::{ errors::{DeserializeError, DeserializeErrorKind, ErrorContext}, types::{ - ChildRef, InternalNode, Key, LeafNode, Manifest, Node, Root, TreeTags, ValueHash, + ChildRef, InternalNode, Key, LeafNode, Manifest, Node, RawNode, Root, TreeTags, ValueHash, HASH_SIZE, KEY_SIZE, }, }; @@ -15,7 +15,7 @@ use crate::{ const LEB128_SIZE_ESTIMATE: usize = 3; impl LeafNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < KEY_SIZE + HASH_SIZE { return Err(DeserializeErrorKind::UnexpectedEof.into()); } @@ -26,6 +26,10 @@ impl LeafNode { let leaf_index = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafIndex) })?; + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } + Ok(Self { full_key, value_hash, @@ -105,7 +109,7 @@ impl ChildRef { } impl InternalNode { - pub(super) fn deserialize(bytes: &[u8]) -> Result { + pub(super) fn deserialize(bytes: &[u8], strict: bool) -> Result { if bytes.len() < 4 { let err = DeserializeErrorKind::UnexpectedEof; return Err(err.with_context(ErrorContext::ChildrenMask)); @@ -134,6 +138,9 @@ impl InternalNode { } bitmap >>= 2; } + if strict && !bytes.is_empty() { + return Err(DeserializeErrorKind::Leftovers.into()); + } Ok(this) } @@ -161,8 +168,36 @@ impl InternalNode { } } +impl RawNode { + pub(crate) fn deserialize(bytes: &[u8]) -> Self { + Self { + raw: bytes.to_vec(), + leaf: LeafNode::deserialize(bytes, true).ok(), + internal: InternalNode::deserialize(bytes, true).ok(), + } + } + + pub(crate) fn deserialize_root(bytes: &[u8]) -> Self { + let root = Root::deserialize(bytes, true).ok(); + let node = root.and_then(|root| match root { + Root::Empty => None, + Root::Filled { node, .. } => Some(node), + }); + let (leaf, internal) = match node { + None => (None, None), + Some(Node::Leaf(leaf)) => (Some(leaf), None), + Some(Node::Internal(node)) => (None, Some(node)), + }; + Self { + raw: bytes.to_vec(), + leaf, + internal, + } + } +} + impl Root { - pub(super) fn deserialize(mut bytes: &[u8]) -> Result { + pub(super) fn deserialize(mut bytes: &[u8], strict: bool) -> Result { let leaf_count = leb128::read::unsigned(&mut bytes).map_err(|err| { DeserializeErrorKind::Leb128(err).with_context(ErrorContext::LeafCount) })?; @@ -172,11 +207,11 @@ impl Root { // Try both the leaf and internal node serialization; in some cases, a single leaf // may still be persisted as an internal node. Since serialization of an internal node with a single child // is always shorter than that a leaf, the order (first leaf, then internal node) is chosen intentionally. - LeafNode::deserialize(bytes) + LeafNode::deserialize(bytes, strict) .map(Node::Leaf) - .or_else(|_| InternalNode::deserialize(bytes).map(Node::Internal))? + .or_else(|_| InternalNode::deserialize(bytes, strict).map(Node::Internal))? } - _ => Node::Internal(InternalNode::deserialize(bytes)?), + _ => Node::Internal(InternalNode::deserialize(bytes, strict)?), }; Ok(Self::new(leaf_count, node)) } @@ -440,7 +475,7 @@ mod tests { assert_eq!(buffer[64], 42); // leaf index assert_eq!(buffer.len(), 65); - let leaf_copy = LeafNode::deserialize(&buffer).unwrap(); + let leaf_copy = LeafNode::deserialize(&buffer, true).unwrap(); assert_eq!(leaf_copy, leaf); } @@ -471,7 +506,7 @@ mod tests { let child_count = bitmap.count_ones(); assert_eq!(child_count, 2); - let node_copy = InternalNode::deserialize(&buffer).unwrap(); + let node_copy = InternalNode::deserialize(&buffer, true).unwrap(); assert_eq!(node_copy, node); } @@ -482,7 +517,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer, [0]); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -494,7 +529,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 1); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } @@ -506,7 +541,7 @@ mod tests { root.serialize(&mut buffer); assert_eq!(buffer[0], 2); - let root_copy = Root::deserialize(&buffer).unwrap(); + let root_copy = Root::deserialize(&buffer, true).unwrap(); assert_eq!(root_copy, root); } } diff --git a/core/lib/merkle_tree/src/types/internal.rs b/core/lib/merkle_tree/src/types/internal.rs index 399f6c840a3..2db075d9221 100644 --- a/core/lib/merkle_tree/src/types/internal.rs +++ b/core/lib/merkle_tree/src/types/internal.rs @@ -2,7 +2,9 @@ //! some of these types are declared as public and can be even exported using the `unstable` module. //! Still, logically these types are private, so adding them to new public APIs etc. is a logical error. -use std::{collections::HashMap, fmt, num::NonZeroU64}; +use std::{collections::HashMap, fmt, num::NonZeroU64, str::FromStr}; + +use anyhow::Context; use crate::{ hasher::{HashTree, InternalNodeCache}, @@ -276,6 +278,34 @@ impl fmt::Debug for Nibbles { } } +impl FromStr for Nibbles { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + anyhow::ensure!(s.len() <= KEY_SIZE * 2, "too many nibbles"); + let mut bytes = NibblesBytes::default(); + for (i, byte) in s.bytes().enumerate() { + let nibble = match byte { + b'0'..=b'9' => byte - b'0', + b'A'..=b'F' => byte - b'A' + 10, + b'a'..=b'f' => byte - b'a' + 10, + _ => anyhow::bail!("unexpected nibble: {byte:?}"), + }; + + assert!(nibble < 16); + if i % 2 == 0 { + bytes[i / 2] = nibble * 16; + } else { + bytes[i / 2] += nibble; + } + } + Ok(Self { + nibble_count: s.len(), + bytes, + }) + } +} + /// Versioned key in a radix-16 Merkle tree. #[derive(Clone, Copy, PartialEq, Eq, Hash)] pub struct NodeKey { @@ -283,12 +313,31 @@ pub struct NodeKey { pub(crate) nibbles: Nibbles, } -impl fmt::Debug for NodeKey { +impl fmt::Display for NodeKey { fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { write!(formatter, "{}:{}", self.version, self.nibbles) } } +impl fmt::Debug for NodeKey { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, formatter) + } +} + +impl FromStr for NodeKey { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + let (version, nibbles) = s + .split_once(':') + .context("node key does not contain `:` delimiter")?; + let version = version.parse().context("invalid key version")?; + let nibbles = nibbles.parse().context("invalid nibbles")?; + Ok(Self { version, nibbles }) + } +} + impl NodeKey { pub(crate) const fn empty(version: u64) -> Self { Self { @@ -331,19 +380,13 @@ impl NodeKey { } } -impl fmt::Display for NodeKey { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(formatter, "{}:{}", self.version, self.nibbles) - } -} - /// Leaf node of the tree. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] pub struct LeafNode { - pub(crate) full_key: Key, - pub(crate) value_hash: ValueHash, - pub(crate) leaf_index: u64, + pub full_key: Key, + pub value_hash: ValueHash, + pub leaf_index: u64, } impl LeafNode { @@ -364,7 +407,7 @@ impl LeafNode { /// Reference to a child in an [`InternalNode`]. #[derive(Debug, Clone, Copy)] #[cfg_attr(test, derive(PartialEq, Eq))] -pub(crate) struct ChildRef { +pub struct ChildRef { pub hash: ValueHash, pub version: u64, pub is_leaf: bool, @@ -449,7 +492,7 @@ impl InternalNode { self.cache.get_or_insert(cache) } - pub(crate) fn children(&self) -> impl Iterator + '_ { + pub fn children(&self) -> impl Iterator + '_ { self.children.iter() } @@ -510,6 +553,17 @@ impl From for Node { } } +/// Raw node fetched from a database. +#[derive(Debug)] +pub struct RawNode { + /// Bytes for a serialized node. + pub raw: Vec, + /// Leaf if a node can be deserialized into it. + pub leaf: Option, + /// Internal node if a node can be deserialized into it. + pub internal: Option, +} + /// Root node of the tree. Besides a [`Node`], contains the general information about the tree /// (e.g., the number of leaves). #[derive(Debug, Clone)] @@ -614,15 +668,23 @@ mod tests { fn nibbles_and_node_key_display() { let nibbles = Nibbles::new(&TEST_KEY, 5); assert_eq!(nibbles.to_string(), "deadb"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 6); assert_eq!(nibbles.to_string(), "deadbe"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let nibbles = Nibbles::new(&TEST_KEY, 9); assert_eq!(nibbles.to_string(), "deadbeef0"); + let restored: Nibbles = nibbles.to_string().parse().unwrap(); + assert_eq!(restored, nibbles); let node_key = nibbles.with_version(3); assert_eq!(node_key.to_string(), "3:deadbeef0"); + let restored: NodeKey = node_key.to_string().parse().unwrap(); + assert_eq!(restored, node_key); } #[test] diff --git a/core/lib/merkle_tree/src/types/mod.rs b/core/lib/merkle_tree/src/types/mod.rs index 807ae023876..63db4b318b2 100644 --- a/core/lib/merkle_tree/src/types/mod.rs +++ b/core/lib/merkle_tree/src/types/mod.rs @@ -6,7 +6,7 @@ pub(crate) use self::internal::{ ChildRef, Nibbles, NibblesBytes, StaleNodeKey, TreeTags, HASH_SIZE, KEY_SIZE, TREE_DEPTH, }; pub use self::internal::{ - InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, Root, + InternalNode, LeafNode, Manifest, Node, NodeKey, ProfiledTreeOperation, RawNode, Root, }; mod internal; diff --git a/core/lib/merkle_tree/tests/integration/domain.rs b/core/lib/merkle_tree/tests/integration/domain.rs index abd3dbbcd3f..fa7ec4cfde3 100644 --- a/core/lib/merkle_tree/tests/integration/domain.rs +++ b/core/lib/merkle_tree/tests/integration/domain.rs @@ -68,6 +68,31 @@ fn basic_workflow() { tree.verify_consistency(L1BatchNumber(0)).unwrap(); assert_eq!(tree.root_hash(), expected_root_hash); assert_eq!(tree.next_l1_batch_number(), L1BatchNumber(1)); + + let keys = ["0:", "0:0"].map(|key| key.parse().unwrap()); + let raw_nodes = tree.reader().raw_nodes(&keys); + assert_eq!(raw_nodes.len(), 2); + let raw_root = raw_nodes[0].as_ref().unwrap(); + assert!(!raw_root.raw.is_empty()); + assert!(raw_root.internal.is_some()); + assert!(raw_root.leaf.is_none()); + + let raw_node = raw_nodes[1].as_ref().unwrap(); + assert!(!raw_node.raw.is_empty()); + assert!(raw_node.leaf.is_none()); + let raw_node = raw_node.internal.as_ref().unwrap(); + + let (nibble, _) = raw_node + .children() + .find(|(_, child_ref)| child_ref.is_leaf) + .unwrap(); + let leaf_key = format!("0:0{nibble:x}").parse().unwrap(); + let raw_nodes = tree.reader().raw_nodes(&[leaf_key]); + assert_eq!(raw_nodes.len(), 1); + let raw_leaf = raw_nodes.into_iter().next().unwrap().expect("no leaf"); + assert!(!raw_leaf.raw.is_empty()); + assert!(raw_leaf.leaf.is_some()); + assert!(raw_leaf.internal.is_none()); } #[test] diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 67a00d064ad..eb770bf9b57 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -28,8 +28,8 @@ zksync_types.workspace = true zksync_contracts.workspace = true zksync_utils.workspace = true zksync_system_constants.workspace = true -zksync_mini_merkle_tree.workspace = true zksync_vm_interface.workspace = true +zksync_mini_merkle_tree.workspace = true anyhow.workspace = true hex.workspace = true @@ -38,10 +38,11 @@ once_cell.workspace = true thiserror.workspace = true tracing.workspace = true vise.workspace = true +ethabi.workspace = true [dev-dependencies] assert_matches.workspace = true -tokio = { workspace = true, features = ["time"] } +pretty_assertions.workspace = true +test-casing.workspace = true zksync_test_account.workspace = true -ethabi.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/README.md b/core/lib/multivm/README.md index 5e2af426ae5..34883db5990 100644 --- a/core/lib/multivm/README.md +++ b/core/lib/multivm/README.md @@ -4,3 +4,17 @@ This crate represents a wrapper over several versions of VM that have been used glue code that allows switching the VM version based on the externally provided marker while preserving the public interface. This crate exists to enable the external node to process breaking upgrades and re-execute all the transactions from the genesis block. + +## Developer guidelines + +### Adding tests + +If you want to add unit tests for the VM wrapper, consider the following: + +- Whenever possible, make tests reusable; declare test logic in the [`testonly`](src/versions/testonly/mod.rs) module, + and then instantiate tests using this logic for the supported VM versions. If necessary, extend the tested VM trait so + that test logic can be defined in a generic way. See the `testonly` module docs for more detailed guidelines. +- If you define a generic test, don't forget to add its instantiations for all supported VMs (`vm_latest`, `vm_fast` and + `shadow`). `shadow` tests allow checking VM divergences for free! +- Do not use an RNG where it can be avoided (e.g., for test contract addresses). +- Avoid using zero / default values in cases they can be treated specially by the tested code. diff --git a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs index ce928e652d7..50bb19938fe 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_block_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_block_result.rs @@ -47,6 +47,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -103,6 +104,7 @@ impl GlueFrom for crate::interface::Fi circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -158,6 +160,7 @@ impl GlueFrom for crate::interface: circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: value.full_result.events, @@ -227,6 +230,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -259,6 +263,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } @@ -307,6 +312,7 @@ impl GlueFrom circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs index 3cb61b461a4..4c4cffcc687 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_partial_execution_result.rs @@ -22,6 +22,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -48,6 +49,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } @@ -74,6 +76,7 @@ impl GlueFrom gas_refunded: 0, operator_suggested_refund: 0, }, + new_known_factory_deps: None, } } } diff --git a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs index 2dc680ba77d..8978d4348ed 100644 --- a/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs +++ b/core/lib/multivm/src/glue/types/vm/vm_tx_execution_result.rs @@ -66,12 +66,14 @@ impl GlueFrom VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -100,12 +102,14 @@ impl logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, TxRevertReason::Halt(halt) => VmExecutionResultAndLogs { result: ExecutionResult::Halt { reason: halt }, logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }, } } @@ -129,6 +133,7 @@ impl GlueFrom { unreachable!("Halt is the only revert reason for VM 5") diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index e171a78e179..1cba2c0fb92 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -16,10 +16,11 @@ pub use crate::{ vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_fast, vm_latest, vm_m5, vm_m6, vm_refunds_enhancement, vm_virtual_blocks, }, - vm_instance::{FastVmInstance, LegacyVmInstance}, + vm_instance::{is_supported_by_fast_vm, FastVmInstance, LegacyVmInstance}, }; mod glue; +pub mod pubdata_builders; pub mod tracers; pub mod utils; mod versions; diff --git a/core/lib/multivm/src/pubdata_builders/mod.rs b/core/lib/multivm/src/pubdata_builders/mod.rs new file mode 100644 index 00000000000..c52c4c70c86 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/mod.rs @@ -0,0 +1,24 @@ +use std::rc::Rc; + +pub use rollup::RollupPubdataBuilder; +pub use validium::ValidiumPubdataBuilder; +use zksync_types::commitment::{L1BatchCommitmentMode, PubdataParams}; + +use crate::interface::pubdata::PubdataBuilder; + +mod rollup; +#[cfg(test)] +mod tests; +mod utils; +mod validium; + +pub fn pubdata_params_to_builder(params: PubdataParams) -> Rc { + match params.pubdata_type { + L1BatchCommitmentMode::Rollup => { + Rc::new(RollupPubdataBuilder::new(params.l2_da_validator_address)) + } + L1BatchCommitmentMode::Validium => { + Rc::new(ValidiumPubdataBuilder::new(params.l2_da_validator_address)) + } + } +} diff --git a/core/lib/multivm/src/pubdata_builders/rollup.rs b/core/lib/multivm/src/pubdata_builders/rollup.rs new file mode 100644 index 00000000000..4a818dfe231 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/rollup.rs @@ -0,0 +1,128 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + writes::compress_state_diffs, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct RollupPubdataBuilder { + pub l2_da_validator: Address, +} + +impl RollupPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for RollupPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + if protocol_version.is_pre_gateway() { + let mut operator_input = vec![]; + extend_from_pubdata_input(&mut operator_input, input); + + // Extend with uncompressed state diffs. + operator_input.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + operator_input.extend(state_diff.encode_padded()); + } + + operator_input + } else { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + // Extend with uncompressed state diffs. + pubdata.extend((input.state_diffs.len() as u32).to_be_bytes()); + for state_diff in &input.state_diffs { + pubdata.extend(state_diff.encode_padded()); + } + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)].concat() + } + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + _protocol_version: ProtocolVersionId, + ) -> Vec { + let mut pubdata = vec![]; + extend_from_pubdata_input(&mut pubdata, input); + + pubdata + } +} + +fn extend_from_pubdata_input(buffer: &mut Vec, pubdata_input: &PubdataInput) { + let PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } = pubdata_input; + + // Adding user L2->L1 logs. + buffer.extend(encode_user_logs(user_logs)); + + // Encoding L2->L1 messages + // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` + buffer.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); + for message in l2_to_l1_messages { + buffer.extend((message.len() as u32).to_be_bytes()); + buffer.extend(message); + } + // Encoding bytecodes + // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` + buffer.extend((published_bytecodes.len() as u32).to_be_bytes()); + for bytecode in published_bytecodes { + buffer.extend((bytecode.len() as u32).to_be_bytes()); + buffer.extend(bytecode); + } + // Encoding state diffs + // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` + let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); + buffer.extend(state_diffs_compressed); +} diff --git a/core/lib/multivm/src/pubdata_builders/tests.rs b/core/lib/multivm/src/pubdata_builders/tests.rs new file mode 100644 index 00000000000..bc24b8e4734 --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/tests.rs @@ -0,0 +1,123 @@ +use zksync_types::{ + writes::StateDiffRecord, Address, ProtocolVersionId, ACCOUNT_CODE_STORAGE_ADDRESS, + BOOTLOADER_ADDRESS, +}; +use zksync_utils::u256_to_h256; + +use super::{rollup::RollupPubdataBuilder, validium::ValidiumPubdataBuilder}; +use crate::interface::pubdata::{L1MessengerL2ToL1Log, PubdataBuilder, PubdataInput}; + +fn mock_input() -> PubdataInput { + // Just using some constant addresses for tests + let addr1 = BOOTLOADER_ADDRESS; + let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; + + let user_logs = vec![L1MessengerL2ToL1Log { + l2_shard_id: 0, + is_service: false, + tx_number_in_block: 0, + sender: addr1, + key: 1.into(), + value: 128.into(), + }]; + + let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; + + let published_bytecodes = vec![hex::decode("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb").unwrap()]; + + // For covering more cases, we have two state diffs: + // One with enumeration index present (and so it is a repeated write) and the one without it. + let state_diffs = vec![ + StateDiffRecord { + address: addr2, + key: 155.into(), + derived_key: u256_to_h256(125.into()).0, + enumeration_index: 12, + initial_value: 11.into(), + final_value: 12.into(), + }, + StateDiffRecord { + address: addr2, + key: 156.into(), + derived_key: u256_to_h256(126.into()).0, + enumeration_index: 0, + initial_value: 0.into(), + final_value: 14.into(), + }, + ]; + + PubdataInput { + user_logs, + l2_to_l1_messages, + published_bytecodes, + state_diffs, + } +} + +#[test] +fn test_rollup_pubdata_building() { + let input = mock_input(); + + let rollup_pubdata_builder = RollupPubdataBuilder::new(Address::zero()); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version24); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (pre gateway)" + ); + + let actual = + rollup_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000032300000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input` (post gateway)" + ); + + let actual = + rollup_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "00000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000060bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaabbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata` (post gateway)" + ); +} + +#[test] +fn test_validium_pubdata_building() { + let input = mock_input(); + + let validium_pubdata_builder = ValidiumPubdataBuilder::new(Address::zero()); + + let actual = + validium_pubdata_builder.l1_messenger_operator_input(&input, ProtocolVersionId::Version27); + let expected = "89f9a07233e608561d90f7c4e7bcea24d718e425a6bd6c8eefb48a334366143694c75fae278944d856d68e33bbd32937cb3a1ea35cbf7d6eeeb1150f500dd0d64d0efe420d6dafe5897eab2fc27b2e47af303397ed285ace146d836d042717b0a3dc4b28a603a33b28ce1d5c52c593a46a15a99f1afa1c1d92715284288958fd54a93de700000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000005c000000010000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000008000000000"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `l1_messenger_operator_input`" + ); + + let actual = + validium_pubdata_builder.settlement_layer_pubdata(&input, ProtocolVersionId::Version27); + let expected = "fa96e2436e6fb4d668f5a06681a7c53fcb199b2747ee624ee52a13e85aac5f1e"; + assert_eq!( + &hex::encode(actual), + expected, + "mismatch for `settlement_layer_pubdata`" + ); +} diff --git a/core/lib/multivm/src/pubdata_builders/utils.rs b/core/lib/multivm/src/pubdata_builders/utils.rs new file mode 100644 index 00000000000..57361a674fb --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/utils.rs @@ -0,0 +1,70 @@ +use zksync_mini_merkle_tree::MiniMerkleTree; +use zksync_types::web3::keccak256; +use zksync_utils::bytecode::hash_bytecode; + +use crate::interface::pubdata::L1MessengerL2ToL1Log; + +pub(crate) fn build_chained_log_hash(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + let mut chained_log_hash = vec![0u8; 32]; + + for log in user_logs { + let log_bytes = log.packed_encoding(); + let hash = keccak256(&log_bytes); + + chained_log_hash = keccak256(&[chained_log_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_log_hash +} + +pub(crate) fn build_logs_root( + user_logs: &[L1MessengerL2ToL1Log], + l2_to_l1_logs_tree_size: usize, +) -> Vec { + let logs = user_logs.iter().map(|log| { + let encoded = log.packed_encoding(); + let mut slice = [0u8; 88]; + slice.copy_from_slice(&encoded); + slice + }); + MiniMerkleTree::new(logs, Some(l2_to_l1_logs_tree_size)) + .merkle_root() + .as_bytes() + .to_vec() +} + +pub(crate) fn build_chained_message_hash(l2_to_l1_messages: &[Vec]) -> Vec { + let mut chained_msg_hash = vec![0u8; 32]; + + for msg in l2_to_l1_messages { + let hash = keccak256(msg); + + chained_msg_hash = keccak256(&[chained_msg_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_msg_hash +} + +pub(crate) fn build_chained_bytecode_hash(published_bytecodes: &[Vec]) -> Vec { + let mut chained_bytecode_hash = vec![0u8; 32]; + + for bytecode in published_bytecodes { + let hash = hash_bytecode(bytecode).to_fixed_bytes(); + + chained_bytecode_hash = + keccak256(&[chained_bytecode_hash, hash.to_vec()].concat()).to_vec(); + } + + chained_bytecode_hash +} + +pub(crate) fn encode_user_logs(user_logs: &[L1MessengerL2ToL1Log]) -> Vec { + // Encoding user L2->L1 logs. + // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` + let mut result = vec![]; + result.extend((user_logs.len() as u32).to_be_bytes()); + for l2tol1log in user_logs { + result.extend(l2tol1log.packed_encoding()); + } + result +} diff --git a/core/lib/multivm/src/pubdata_builders/validium.rs b/core/lib/multivm/src/pubdata_builders/validium.rs new file mode 100644 index 00000000000..a9156e970aa --- /dev/null +++ b/core/lib/multivm/src/pubdata_builders/validium.rs @@ -0,0 +1,93 @@ +use zksync_types::{ + ethabi, + ethabi::{ParamType, Token}, + l2_to_l1_log::l2_to_l1_logs_tree_size, + web3::keccak256, + Address, ProtocolVersionId, +}; + +use super::utils::{ + build_chained_bytecode_hash, build_chained_log_hash, build_chained_message_hash, + build_logs_root, encode_user_logs, +}; +use crate::interface::pubdata::{PubdataBuilder, PubdataInput}; + +#[derive(Debug, Clone, Copy)] +pub struct ValidiumPubdataBuilder { + pub l2_da_validator: Address, +} + +impl ValidiumPubdataBuilder { + pub fn new(l2_da_validator: Address) -> Self { + Self { l2_da_validator } + } +} + +impl PubdataBuilder for ValidiumPubdataBuilder { + fn l2_da_validator(&self) -> Address { + self.l2_da_validator + } + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let mut pubdata = vec![]; + pubdata.extend(encode_user_logs(&input.user_logs)); + + let chained_log_hash = build_chained_log_hash(&input.user_logs); + let log_root_hash = + build_logs_root(&input.user_logs, l2_to_l1_logs_tree_size(protocol_version)); + let chained_msg_hash = build_chained_message_hash(&input.l2_to_l1_messages); + let chained_bytecodes_hash = build_chained_bytecode_hash(&input.published_bytecodes); + + let l2_da_header = vec![ + Token::FixedBytes(chained_log_hash), + Token::FixedBytes(log_root_hash), + Token::FixedBytes(chained_msg_hash), + Token::FixedBytes(chained_bytecodes_hash), + Token::Bytes(pubdata), + ]; + + // Selector of `IL2DAValidator::validatePubdata`. + let func_selector = ethabi::short_signature( + "validatePubdata", + &[ + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::FixedBytes(32), + ParamType::Bytes, + ], + ); + + [func_selector.to_vec(), ethabi::encode(&l2_da_header)] + .concat() + .to_vec() + } + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec { + assert!( + !protocol_version.is_pre_gateway(), + "ValidiumPubdataBuilder must not be called for pre gateway" + ); + + let state_diffs_packed = input + .state_diffs + .iter() + .flat_map(|diff| diff.encode_padded()) + .collect::>(); + + keccak256(&state_diffs_packed).to_vec() + } +} diff --git a/core/lib/multivm/src/tracers/validator/mod.rs b/core/lib/multivm/src/tracers/validator/mod.rs index a1573f24c66..057551a9efe 100644 --- a/core/lib/multivm/src/tracers/validator/mod.rs +++ b/core/lib/multivm/src/tracers/validator/mod.rs @@ -1,4 +1,8 @@ -use std::{collections::HashSet, marker::PhantomData, sync::Arc}; +use std::{ + collections::{BTreeSet, HashSet}, + marker::PhantomData, + sync::Arc, +}; use once_cell::sync::OnceCell; use zksync_system_constants::{ @@ -8,7 +12,7 @@ use zksync_system_constants::{ use zksync_types::{ vm::VmVersion, web3::keccak256, AccountTreeId, Address, StorageKey, H256, U256, }; -use zksync_utils::{be_bytes_to_safe_address, u256_to_account_address, u256_to_h256}; +use zksync_utils::{address_to_u256, be_bytes_to_safe_address, u256_to_h256}; use self::types::{NewTrustedValidationItems, ValidationTracerMode}; use crate::{ @@ -32,7 +36,7 @@ mod vm_virtual_blocks; #[derive(Debug, Clone)] pub struct ValidationTracer { validation_mode: ValidationTracerMode, - auxilary_allowed_slots: HashSet, + auxilary_allowed_slots: BTreeSet, user_address: Address, #[allow(dead_code)] @@ -51,6 +55,8 @@ pub struct ValidationTracer { type ValidationRoundResult = Result; impl ValidationTracer { + const MAX_ALLOWED_SLOT_OFFSET: u32 = 127; + pub fn new( params: ValidationParams, vm_version: VmVersion, @@ -131,9 +137,15 @@ impl ValidationTracer { } // The user is allowed to touch its own slots or slots semantically related to him. + let from = u256_to_h256(key.saturating_sub(Self::MAX_ALLOWED_SLOT_OFFSET.into())); + let to = u256_to_h256(key); let valid_users_slot = address == self.user_address - || u256_to_account_address(&key) == self.user_address - || self.auxilary_allowed_slots.contains(&u256_to_h256(key)); + || key == address_to_u256(&self.user_address) + || self + .auxilary_allowed_slots + .range(from..=to) + .next() + .is_some(); if valid_users_slot { return true; } diff --git a/core/lib/multivm/src/utils/events.rs b/core/lib/multivm/src/utils/events.rs index 9720cb77914..d84651989e7 100644 --- a/core/lib/multivm/src/utils/events.rs +++ b/core/lib/multivm/src/utils/events.rs @@ -1,59 +1,10 @@ use zksync_system_constants::L1_MESSENGER_ADDRESS; use zksync_types::{ ethabi::{self, Token}, - l2_to_l1_log::L2ToL1Log, - Address, H256, U256, + H256, U256, }; -use zksync_utils::{u256_to_bytes_be, u256_to_h256}; -use crate::interface::VmEvent; - -/// Corresponds to the following solidity event: -/// ```solidity -/// struct L2ToL1Log { -/// uint8 l2ShardId; -/// bool isService; -/// uint16 txNumberInBlock; -/// address sender; -/// bytes32 key; -/// bytes32 value; -/// } -/// ``` -#[derive(Debug, Default, Clone, PartialEq)] -pub(crate) struct L1MessengerL2ToL1Log { - pub l2_shard_id: u8, - pub is_service: bool, - pub tx_number_in_block: u16, - pub sender: Address, - pub key: U256, - pub value: U256, -} - -impl L1MessengerL2ToL1Log { - pub fn packed_encoding(&self) -> Vec { - let mut res: Vec = vec![]; - res.push(self.l2_shard_id); - res.push(self.is_service as u8); - res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); - res.extend_from_slice(self.sender.as_bytes()); - res.extend(u256_to_bytes_be(&self.key)); - res.extend(u256_to_bytes_be(&self.value)); - res - } -} - -impl From for L2ToL1Log { - fn from(log: L1MessengerL2ToL1Log) -> Self { - L2ToL1Log { - shard_id: log.l2_shard_id, - is_service: log.is_service, - tx_number_in_block: log.tx_number_in_block, - sender: log.sender, - key: u256_to_h256(log.key), - value: u256_to_h256(log.value), - } - } -} +use crate::interface::{pubdata::L1MessengerL2ToL1Log, VmEvent}; #[derive(Debug, PartialEq)] pub(crate) struct L1MessengerBytecodePublicationRequest { @@ -142,7 +93,8 @@ mod tests { use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, }; - use zksync_types::L1BatchNumber; + use zksync_types::{Address, L1BatchNumber}; + use zksync_utils::u256_to_h256; use super::*; diff --git a/core/lib/multivm/src/utils/mod.rs b/core/lib/multivm/src/utils/mod.rs index 44ed004adc2..a55adb16c85 100644 --- a/core/lib/multivm/src/utils/mod.rs +++ b/core/lib/multivm/src/utils/mod.rs @@ -248,7 +248,7 @@ pub fn get_bootloader_encoding_space(version: VmVersion) -> u32 { ) } VmVersion::VmGateway => crate::vm_latest::constants::get_bootloader_tx_encoding_space( - crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, + crate::vm_latest::MultiVMSubversion::Gateway, ), } } @@ -397,11 +397,14 @@ pub fn get_used_bootloader_memory_bytes(version: VmVersion) -> usize { crate::vm_latest::MultiVMSubversion::SmallBootloaderMemory, ) } - VmVersion::Vm1_5_0IncreasedBootloaderMemory | VmVersion::VmGateway => { + VmVersion::Vm1_5_0IncreasedBootloaderMemory => { crate::vm_latest::constants::get_used_bootloader_memory_bytes( crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } @@ -430,11 +433,14 @@ pub fn get_used_bootloader_memory_words(version: VmVersion) -> usize { crate::vm_latest::MultiVMSubversion::SmallBootloaderMemory, ) } - VmVersion::Vm1_5_0IncreasedBootloaderMemory | VmVersion::VmGateway => { + VmVersion::Vm1_5_0IncreasedBootloaderMemory => { crate::vm_latest::constants::get_used_bootloader_memory_bytes( crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ) } + VmVersion::VmGateway => crate::vm_latest::constants::get_used_bootloader_memory_bytes( + crate::vm_latest::MultiVMSubversion::Gateway, + ), } } diff --git a/core/lib/multivm/src/versions/README.md b/core/lib/multivm/src/versions/README.md deleted file mode 100644 index 01c57509197..00000000000 --- a/core/lib/multivm/src/versions/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# MultiVM dependencies - -This folder contains the old versions of the VM we have used in the past. The `multivm` crate uses them to dynamically -switch the version we use to be able to sync from the genesis. This is a temporary measure until a "native" solution is -implemented (i.e., the `vm` crate would itself know the changes between versions, and thus we will have only the -functional diff between versions, not several fully-fledged VMs). - -## Versions - -| Name | Protocol versions | Description | -| ---------------------- | ----------------- | --------------------------------------------------------------------- | -| vm_m5 | 0 - 3 | Release for the testnet launch | -| vm_m6 | 4 - 6 | Release for the mainnet launch | -| vm_1_3_2 | 7 - 12 | Release 1.3.2 of the crypto circuits | -| vm_virtual_blocks | 13 - 15 | Adding virtual blocks to help with block number / timestamp migration | -| vm_refunds_enhancement | 16 - 17 | Fixing issue related to refunds in VM | -| vm_boojum_integration | 18 - | New Proving system (boojum), vm version 1.4.0 | diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index bcb246cece4..b6523b3d474 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,8 +1,8 @@ +#[cfg(test)] +mod shadow; mod shared; #[cfg(test)] mod testonly; -#[cfg(test)] -mod tests; pub mod vm_1_3_2; pub mod vm_1_4_1; pub mod vm_1_4_2; diff --git a/core/lib/multivm/src/versions/tests.rs b/core/lib/multivm/src/versions/shadow/mod.rs similarity index 95% rename from core/lib/multivm/src/versions/tests.rs rename to core/lib/multivm/src/versions/shadow/mod.rs index ea009b450c8..350caafabe1 100644 --- a/core/lib/multivm/src/versions/tests.rs +++ b/core/lib/multivm/src/versions/shadow/mod.rs @@ -22,14 +22,16 @@ use crate::{ }, utils::get_max_gas_per_pubdata_byte, versions::testonly::{ - default_l1_batch, default_system_env, make_account_rich, ContractToDeploy, + default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, }, - vm_fast, - vm_latest::{self, HistoryEnabled}, + vm_latest, + vm_latest::HistoryEnabled, }; +mod tests; + type ReferenceVm = vm_latest::Vm, HistoryEnabled>; -// type ShadowedFastVm = crate::vm_instance::ShadowedFastVm; +type ShadowedFastVm = crate::vm_instance::ShadowedFastVm; fn hash_block(block_env: L2BlockEnv, tx_hashes: &[H256]) -> H256 { let mut hasher = L2BlockHasher::new( @@ -70,8 +72,8 @@ impl Harness { fn new(l1_batch_env: &L1BatchEnv) -> Self { Self { - alice: Account::random(), - bob: Account::random(), + alice: Account::from_seed(0), + bob: Account::from_seed(1), storage_contract: ContractToDeploy::new( read_bytecode(Self::STORAGE_CONTRACT_PATH), Self::STORAGE_CONTRACT_ADDRESS, @@ -82,8 +84,8 @@ impl Harness { } fn setup_storage(&self, storage: &mut InMemoryStorage) { - make_account_rich(storage, &self.alice); - make_account_rich(storage, &self.bob); + make_address_rich(storage, self.alice.address); + make_address_rich(storage, self.bob.address); self.storage_contract.insert(storage); let storage_contract_key = StorageKey::new( @@ -196,7 +198,6 @@ impl Harness { assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); self.new_block(vm, &[deploy_tx.tx.hash(), load_test_tx.hash()]); - vm.finish_batch(); } } @@ -221,10 +222,10 @@ fn sanity_check_harness() { sanity_check_vm::(); } -#[test] -fn sanity_check_harness_on_new_vm() { - sanity_check_vm::>(); -} +// #[test] +// fn sanity_check_harness_on_new_vm() { +// sanity_check_vm::>(); +// } #[test] fn sanity_check_shadow_vm() { diff --git a/core/lib/multivm/src/versions/shadow/tests.rs b/core/lib/multivm/src/versions/shadow/tests.rs new file mode 100644 index 00000000000..78fd29809a9 --- /dev/null +++ b/core/lib/multivm/src/versions/shadow/tests.rs @@ -0,0 +1,427 @@ +//! Unit tests from the `testonly` test suite. + +use std::{collections::HashSet, rc::Rc}; + +use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H256, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; + +use super::ShadowedFastVm; +use crate::{ + interface::{ + utils::{ShadowMut, ShadowRef}, + CurrentExecutionState, L2BlockEnv, VmExecutionResultAndLogs, + }, + versions::testonly::TestedVm, +}; + +impl TestedVm for ShadowedFastVm { + type StateDump = (); + + fn dump_state(&self) -> Self::StateDump { + // Do nothing + } + + fn gas_remaining(&mut self) -> u32 { + self.get_mut("gas_remaining", |r| match r { + ShadowMut::Main(vm) => vm.gas_remaining(), + ShadowMut::Shadow(vm) => vm.gas_remaining(), + }) + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_custom("current_execution_state", |r| match r { + ShadowRef::Main(vm) => vm.get_current_execution_state(), + ShadowRef::Shadow(vm) => vm.get_current_execution_state(), + }) + } + + fn decommitted_hashes(&self) -> HashSet { + self.get("decommitted_hashes", |r| match r { + ShadowRef::Main(vm) => vm.decommitted_hashes(), + ShadowRef::Shadow(vm) => TestedVm::decommitted_hashes(vm), + }) + } + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs { + self.get_custom_mut("finish_batch_with_state_diffs", |r| match r { + ShadowMut::Main(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + ShadowMut::Shadow(vm) => { + vm.finish_batch_with_state_diffs(diffs.clone(), pubdata_builder.clone()) + } + }) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.get_custom_mut("finish_batch_without_pubdata", |r| match r { + ShadowMut::Main(vm) => vm.finish_batch_without_pubdata(), + ShadowMut::Shadow(vm) => vm.finish_batch_without_pubdata(), + }) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + self.get_mut("insert_bytecodes", |r| match r { + ShadowMut::Main(vm) => vm.insert_bytecodes(bytecodes), + ShadowMut::Shadow(vm) => TestedVm::insert_bytecodes(vm, bytecodes), + }); + } + + fn known_bytecode_hashes(&self) -> HashSet { + self.get("known_bytecode_hashes", |r| match r { + ShadowRef::Main(vm) => vm.known_bytecode_hashes(), + ShadowRef::Shadow(vm) => vm.known_bytecode_hashes(), + }) + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + self.get_mut("manually_decommit", |r| match r { + ShadowMut::Main(vm) => vm.manually_decommit(code_hash), + ShadowMut::Shadow(vm) => vm.manually_decommit(code_hash), + }) + } + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]) { + self.get("verify_required_bootloader_heap", |r| match r { + ShadowRef::Main(vm) => vm.verify_required_bootloader_heap(cells), + ShadowRef::Shadow(vm) => vm.verify_required_bootloader_heap(cells), + }); + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + self.get_mut("manually_decommit", |r| match r { + ShadowMut::Main(vm) => vm.write_to_bootloader_heap(cells), + ShadowMut::Shadow(vm) => TestedVm::write_to_bootloader_heap(vm, cells), + }); + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + self.get_mut("read_storage", |r| match r { + ShadowMut::Main(vm) => vm.read_storage(key), + ShadowMut::Shadow(vm) => vm.read_storage(key), + }) + } + + fn last_l2_block_hash(&self) -> H256 { + self.get("last_l2_block_hash", |r| match r { + ShadowRef::Main(vm) => vm.last_l2_block_hash(), + ShadowRef::Shadow(vm) => vm.last_l2_block_hash(), + }) + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.get_mut("push_l2_block_unchecked", |r| match r { + ShadowMut::Main(vm) => vm.push_l2_block_unchecked(block), + ShadowMut::Shadow(vm) => vm.push_l2_block_unchecked(block), + }); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.get_mut("push_transaction_with_refund", |r| match r { + ShadowMut::Main(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + ShadowMut::Shadow(vm) => vm.push_transaction_with_refund(tx.clone(), refund), + }); + } +} + +// mod block_tip { +// use crate::versions::testonly::block_tip::*; +// +// #[test] +// fn dry_run_upper_bound() { +// test_dry_run_upper_bound::(); +// } +// } +// +// mod bootloader { +// use crate::versions::testonly::bootloader::*; +// +// #[test] +// fn dummy_bootloader() { +// test_dummy_bootloader::(); +// } +// +// #[test] +// fn bootloader_out_of_gas() { +// test_bootloader_out_of_gas::(); +// } +// } +// +// mod bytecode_publishing { +// use crate::versions::testonly::bytecode_publishing::*; +// +// #[test] +// fn bytecode_publishing() { +// test_bytecode_publishing::(); +// } +// } +// +// mod circuits { +// use crate::versions::testonly::circuits::*; +// +// #[test] +// fn circuits() { +// test_circuits::(); +// } +// } +// +// mod code_oracle { +// use crate::versions::testonly::code_oracle::*; +// +// #[test] +// fn code_oracle() { +// test_code_oracle::(); +// } +// +// #[test] +// fn code_oracle_big_bytecode() { +// test_code_oracle_big_bytecode::(); +// } +// +// #[test] +// fn refunds_in_code_oracle() { +// test_refunds_in_code_oracle::(); +// } +// } +// +// mod default_aa { +// use crate::versions::testonly::default_aa::*; +// +// #[test] +// fn default_aa_interaction() { +// test_default_aa_interaction::(); +// } +// } +// +// mod gas_limit { +// use crate::versions::testonly::gas_limit::*; +// +// #[test] +// fn tx_gas_limit_offset() { +// test_tx_gas_limit_offset::(); +// } +// } +// +// mod get_used_contracts { +// use crate::versions::testonly::get_used_contracts::*; +// +// #[test] +// fn get_used_contracts() { +// test_get_used_contracts::(); +// } +// +// #[test] +// fn get_used_contracts_with_far_call() { +// test_get_used_contracts_with_far_call::(); +// } +// +// #[test] +// fn get_used_contracts_with_out_of_gas_far_call() { +// test_get_used_contracts_with_out_of_gas_far_call::(); +// } +// } +// +// mod is_write_initial { +// use crate::versions::testonly::is_write_initial::*; +// +// #[test] +// fn is_write_initial_behaviour() { +// test_is_write_initial_behaviour::(); +// } +// } +// +// mod l1_tx_execution { +// use crate::versions::testonly::l1_tx_execution::*; +// +// #[test] +// fn l1_tx_execution() { +// test_l1_tx_execution::(); +// } +// +// #[test] +// fn l1_tx_execution_high_gas_limit() { +// test_l1_tx_execution_high_gas_limit::(); +// } +// } +// +// mod l2_blocks { +// use crate::versions::testonly::l2_blocks::*; +// +// #[test] +// fn l2_block_initialization_timestamp() { +// test_l2_block_initialization_timestamp::(); +// } +// +// #[test] +// fn l2_block_initialization_number_non_zero() { +// test_l2_block_initialization_number_non_zero::(); +// } +// +// #[test] +// fn l2_block_same_l2_block() { +// test_l2_block_same_l2_block::(); +// } +// +// #[test] +// fn l2_block_new_l2_block() { +// test_l2_block_new_l2_block::(); +// } +// +// #[test] +// fn l2_block_first_in_batch() { +// test_l2_block_first_in_batch::(); +// } +// } +// +// mod nonce_holder { +// use crate::versions::testonly::nonce_holder::*; +// +// #[test] +// fn nonce_holder() { +// test_nonce_holder::(); +// } +// } +// +// mod precompiles { +// use crate::versions::testonly::precompiles::*; +// +// #[test] +// fn keccak() { +// test_keccak::(); +// } +// +// #[test] +// fn sha256() { +// test_sha256::(); +// } +// +// #[test] +// fn ecrecover() { +// test_ecrecover::(); +// } +// } +// +// mod refunds { +// use crate::versions::testonly::refunds::*; +// +// #[test] +// fn predetermined_refunded_gas() { +// test_predetermined_refunded_gas::(); +// } +// +// #[test] +// fn negative_pubdata_for_transaction() { +// test_negative_pubdata_for_transaction::(); +// } +// } +// +// mod require_eip712 { +// use crate::versions::testonly::require_eip712::*; +// +// #[test] +// fn require_eip712() { +// test_require_eip712::(); +// } +// } +// +// mod rollbacks { +// use crate::versions::testonly::rollbacks::*; +// +// #[test] +// fn vm_rollbacks() { +// test_vm_rollbacks::(); +// } +// +// #[test] +// fn vm_loadnext_rollbacks() { +// test_vm_loadnext_rollbacks::(); +// } +// +// #[test] +// fn rollback_in_call_mode() { +// test_rollback_in_call_mode::(); +// } +// } +// +// mod secp256r1 { +// use crate::versions::testonly::secp256r1::*; +// +// #[test] +// fn secp256r1() { +// test_secp256r1::(); +// } +// } +// +// mod simple_execution { +// use crate::versions::testonly::simple_execution::*; +// +// #[test] +// fn estimate_fee() { +// test_estimate_fee::(); +// } +// +// #[test] +// fn simple_execute() { +// test_simple_execute::(); +// } +// } +// +// mod storage { +// use crate::versions::testonly::storage::*; +// +// #[test] +// fn storage_behavior() { +// test_storage_behavior::(); +// } +// +// #[test] +// fn transient_storage_behavior() { +// test_transient_storage_behavior::(); +// } +// } +// +// mod tracing_execution_error { +// use crate::versions::testonly::tracing_execution_error::*; +// +// #[test] +// fn tracing_of_execution_errors() { +// test_tracing_of_execution_errors::(); +// } +// } +// +// mod transfer { +// use crate::versions::testonly::transfer::*; +// +// #[test] +// fn send_and_transfer() { +// test_send_and_transfer::(); +// } +// +// #[test] +// fn reentrancy_protection_send_and_transfer() { +// test_reentrancy_protection_send_and_transfer::(); +// } +// } +// +// mod upgrade { +// use crate::versions::testonly::upgrade::*; +// +// #[test] +// fn protocol_upgrade_is_first() { +// test_protocol_upgrade_is_first::(); +// } +// +// #[test] +// fn force_deploy_upgrade() { +// test_force_deploy_upgrade::(); +// } +// +// #[test] +// fn complex_upgrader() { +// test_complex_upgrader::(); +// } +// } diff --git a/core/lib/multivm/src/versions/testonly.rs b/core/lib/multivm/src/versions/testonly.rs deleted file mode 100644 index adfdbd0b327..00000000000 --- a/core/lib/multivm/src/versions/testonly.rs +++ /dev/null @@ -1,96 +0,0 @@ -use zksync_contracts::BaseSystemContracts; -use zksync_test_account::Account; -use zksync_types::{ - block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, utils::storage_key_for_eth_balance, Address, L1BatchNumber, - L2BlockNumber, L2ChainId, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{storage::InMemoryStorage, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, -}; - -pub(super) fn default_system_env() -> SystemEnv { - SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), - } -} - -pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(super) fn make_account_rich(storage: &mut InMemoryStorage, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); -} - -#[derive(Debug, Clone)] -pub(super) struct ContractToDeploy { - bytecode: Vec, - address: Address, - is_account: bool, -} - -impl ContractToDeploy { - pub fn new(bytecode: Vec, address: Address) -> Self { - Self { - bytecode, - address, - is_account: false, - } - } - - // FIXME: restore this method if needed in the main branch - // pub fn account(bytecode: Vec, address: Address) -> Self { - // Self { - // bytecode, - // address, - // is_account: true, - // } - // } - - pub fn insert(&self, storage: &mut InMemoryStorage) { - let deployer_code_key = get_code_key(&self.address); - storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); - if self.is_account { - let is_account_key = get_is_account_key(&self.address); - storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); - } - - // FIXME: restore this method if needed in the main branch - // /// Inserts the contracts into the test environment, bypassing the deployer system contract. - // pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { - // for contract in contracts { - // contract.insert(storage); - // } - // } -} diff --git a/core/lib/multivm/src/versions/testonly/block_tip.rs b/core/lib/multivm/src/versions/testonly/block_tip.rs new file mode 100644 index 00000000000..220653308a7 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/block_tip.rs @@ -0,0 +1,390 @@ +use ethabi::Token; +use itertools::Itertools; +use zksync_contracts::load_sys_contract; +use zksync_system_constants::{ + CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, +}; +use zksync_types::{ + commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use super::{ + default_pubdata_builder, get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, + tester::{TestedVm, VmTesterBuilder}, +}; +use crate::{ + interface::{InspectExecutionMode, L1BatchEnv, TxExecutionMode, VmInterfaceExt}, + versions::testonly::default_l1_batch, + vm_latest::constants::{ + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, + MAX_VM_PUBDATA_PER_BATCH, + }, +}; + +#[derive(Debug, Clone, Default)] +struct L1MessengerTestData { + l2_to_l1_logs: usize, + messages: Vec>, + bytecodes: Vec>, + state_diffs: Vec, +} + +struct MimicCallInfo { + to: Address, + who_to_mimic: Address, + data: Vec, +} + +const CALLS_PER_TX: usize = 1_000; + +fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { + let complex_upgrade = get_complex_upgrade_abi(); + let l1_messenger = load_sys_contract("L1Messenger"); + + let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|i| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendL2ToL1Log") + .unwrap() + .encode_input(&[ + Token::Bool(false), + Token::FixedBytes(H256::from_low_u64_be(2 * i as u64).0.to_vec()), + Token::FixedBytes(H256::from_low_u64_be(2 * i as u64 + 1).0.to_vec()), + ]) + .unwrap(), + }); + let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("sendToL1") + .unwrap() + .encode_input(&[Token::Bytes(message.clone())]) + .unwrap(), + }); + let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { + to: L1_MESSENGER_ADDRESS, + who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, + data: l1_messenger + .function("requestBytecodeL1Publication") + .unwrap() + .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) + .unwrap(), + }); + + let encoded_calls = logs_mimic_calls + .chain(messages_mimic_calls) + .chain(bytecodes_mimic_calls) + .map(|call| { + Token::Tuple(vec![ + Token::Address(call.to), + Token::Address(call.who_to_mimic), + Token::Bytes(call.data), + ]) + }) + .chunks(CALLS_PER_TX) + .into_iter() + .map(|chunk| { + complex_upgrade + .function("mimicCalls") + .unwrap() + .encode_input(&[Token::Array(chunk.collect_vec())]) + .unwrap() + }) + .collect_vec(); + + encoded_calls +} + +struct TestStatistics { + pub max_used_gas: u32, + pub circuit_statistics: u64, + pub execution_metrics_size: u64, +} + +struct StatisticsTagged { + pub statistics: TestStatistics, + pub tag: String, +} + +fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { + let mut storage = get_empty_storage(); + let complex_upgrade_code = read_complex_upgrade(); + + // For this test we'll just put the bytecode onto the force deployer address + storage.set_value( + get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), + hash_bytecode(&complex_upgrade_code), + ); + storage.store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); + + // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute + // the gas limit + + let batch_env = L1BatchEnv { + fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), + ..default_l1_batch(zksync_types::L1BatchNumber(1)) + }; + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_l1_batch_env(batch_env) + .build::(); + + let bytecodes: Vec<_> = test_data.bytecodes.iter().map(Vec::as_slice).collect(); + vm.vm.insert_bytecodes(&bytecodes); + + let txs_data = populate_mimic_calls(test_data.clone()); + let account = &mut vm.rich_accounts[0]; + + for (i, data) in txs_data.into_iter().enumerate() { + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), + calldata: data, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction {i} wasn't successful for input: {:#?}", + test_data + ); + } + + // Now we count how much ergs were spent at the end of the batch + // It is assumed that the top level frame is the bootloader + let gas_before = vm.vm.gas_remaining(); + let result = vm + .vm + .finish_batch_with_state_diffs(test_data.state_diffs.clone(), default_pubdata_builder()); + assert!( + !result.result.is_failed(), + "Batch wasn't successful for input: {test_data:?}" + ); + let gas_after = vm.vm.gas_remaining(); + assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); + + TestStatistics { + max_used_gas: gas_before - gas_after, + circuit_statistics: result.statistics.circuit_statistic.total() as u64, + execution_metrics_size: result.get_execution_metrics(None).size() as u64, + } +} + +fn generate_state_diffs( + repeated_writes: bool, + small_diff: bool, + number_of_state_diffs: usize, +) -> Vec { + (0..number_of_state_diffs) + .map(|i| { + let address = Address::from_low_u64_be(i as u64); + let key = U256::from(i); + let enumeration_index = if repeated_writes { i + 1 } else { 0 }; + + let (initial_value, final_value) = if small_diff { + // As small as it gets, one byte to denote zeroing out the value + (U256::from(1), U256::from(0)) + } else { + // As large as it gets + (U256::from(0), U256::from(2).pow(255.into())) + }; + + StateDiffRecord { + address, + key, + derived_key: u256_to_h256(i.into()).0, + enumeration_index: enumeration_index as u64, + initial_value, + final_value, + } + }) + .collect() +} + +// A valid zkEVM bytecode has odd number of 32 byte words +fn get_valid_bytecode_length(length: usize) -> usize { + // Firstly ensure that the length is divisible by 32 + let length_padded_to_32 = if length % 32 == 0 { + length + } else { + length + 32 - (length % 32) + }; + + // Then we ensure that the number returned by division by 32 is odd + if length_padded_to_32 % 64 == 0 { + length_padded_to_32 + 32 + } else { + length_padded_to_32 + } +} + +pub(crate) fn test_dry_run_upper_bound() { + // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). + // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` + // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. + const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = + (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; + + // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. + // To get the upper bound, we'll try to do the following: + // 1. Max number of logs. + // 2. Lots of small L2->L1 messages / one large L2->L1 message. + // 3. Lots of small bytecodes / one large bytecode. + // 4. Lots of storage slot updates. + + let statistics = vec![ + // max logs + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, + ..Default::default() + }), + tag: "max_logs".to_string(), + }, + // max messages + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, + // so the max number of pubdata is bound by it + messages: vec![ + vec![0; 0]; + MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) + ], + ..Default::default() + }), + tag: "max_messages".to_string(), + }, + // long message + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it + messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], + ..Default::default() + }), + tag: "long_message".to_string(), + }, + // max bytecodes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each bytecode must be at least 32 bytes long. + // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number + bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], + ..Default::default() + }), + tag: "max_bytecodes".to_string(), + }, + // long bytecode + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + bytecodes: vec![ + vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; + 1 + ], + ..Default::default() + }), + tag: "long_bytecode".to_string(), + }, + // lots of small repeated writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) + state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), + ..Default::default() + }), + tag: "small_repeated_writes".to_string(), + }, + // lots of big repeated writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + true, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, + ), + ..Default::default() + }), + tag: "big_repeated_writes".to_string(), + }, + // lots of small initial writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out + state_diffs: generate_state_diffs( + false, + true, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, + ), + ..Default::default() + }), + tag: "small_initial_writes".to_string(), + }, + // lots of large initial writes + StatisticsTagged { + statistics: execute_test::(L1MessengerTestData { + // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value + state_diffs: generate_state_diffs( + false, + false, + MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, + ), + ..Default::default() + }), + tag: "big_initial_writes".to_string(), + }, + ]; + + // We use 2x overhead for the batch tip compared to the worst estimated scenario. + let max_used_gas = statistics + .iter() + .map(|s| (s.statistics.max_used_gas, s.tag.clone())) + .max() + .unwrap(); + assert!( + max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, + "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", + max_used_gas.1, + max_used_gas.0, + BOOTLOADER_BATCH_TIP_OVERHEAD + ); + + let circuit_statistics = statistics + .iter() + .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) + .max() + .unwrap(); + assert!( + circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", + circuit_statistics.1, + circuit_statistics.0, + BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD + ); + + let execution_metrics_size = statistics + .iter() + .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) + .max() + .unwrap(); + assert!( + execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, + "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", + execution_metrics_size.1, + execution_metrics_size.0, + BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD + ); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs b/core/lib/multivm/src/versions/testonly/bootloader.rs similarity index 53% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs rename to core/lib/multivm/src/versions/testonly/bootloader.rs index 8d69d05c444..4b9b63252d6 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/testonly/bootloader.rs @@ -1,50 +1,39 @@ +use assert_matches::assert_matches; use zksync_types::U256; -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - }, -}; - -#[test] -fn test_dummy_bootloader() { +use super::{get_bootloader, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; +use crate::interface::{ExecutionResult, Halt, TxExecutionMode}; + +pub(crate) fn test_dummy_bootloader() { let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); base_system_contracts.bootloader = get_bootloader("dummy"); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(base_system_contracts) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); - let result = vm.vm.execute(VmExecutionMode::Batch); + let result = vm.vm.finish_batch_without_pubdata(); assert!(!result.result.is_failed()); let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); + vm.vm + .verify_required_bootloader_heap(&[(0, correct_first_cell)]); } -#[test] -fn test_bootloader_out_of_gas() { +pub(crate) fn test_bootloader_out_of_gas() { let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); base_system_contracts.bootloader = get_bootloader("dummy"); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(base_system_contracts) .with_bootloader_gas_limit(10) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); - let res = vm.vm.execute(VmExecutionMode::Batch); + let res = vm.vm.finish_batch_without_pubdata(); assert_matches!( res.result, diff --git a/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs new file mode 100644 index 00000000000..9da005b995d --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/bytecode_publishing.rs @@ -0,0 +1,44 @@ +use zksync_test_account::TxType; + +use super::{default_pubdata_builder, read_test_contract, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmEvent, VmInterfaceExt}, + utils::bytecode, +}; + +pub(crate) fn test_bytecode_publishing() { + // In this test, we aim to ensure that the contents of the compressed bytecodes + // are included as part of the L2->L1 long messages + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let counter = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + + let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; + + let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; + assert_eq!(tx.execute.factory_deps.len(), 1); // The deployed bytecode is the only dependency + let push_result = vm.vm.push_transaction(tx); + assert_eq!(push_result.compressed_bytecodes.len(), 1); + assert_eq!(push_result.compressed_bytecodes[0].original, counter); + assert_eq!( + push_result.compressed_bytecodes[0].compressed, + compressed_bytecode + ); + + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Transaction wasn't successful"); + + vm.vm.finish_batch(default_pubdata_builder()); + + let state = vm.vm.get_current_execution_state(); + let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); + assert!( + long_messages.contains(&compressed_bytecode), + "Bytecode not published" + ); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs b/core/lib/multivm/src/versions/testonly/circuits.rs similarity index 61% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs rename to core/lib/multivm/src/versions/testonly/circuits.rs index 7d0dfd1ed0e..de987a8912d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs +++ b/core/lib/multivm/src/versions/testonly/circuits.rs @@ -1,39 +1,41 @@ use zksync_types::{Address, Execute, U256}; +use super::tester::VmTesterBuilder; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + versions::testonly::TestedVm, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) +/// Checks that estimated number of circuits for simple transfer doesn't differ much +/// from hardcoded expected value. +pub(crate) fn test_circuits() { + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::repeat_byte(1)), calldata: Vec::new(), value: U256::from(1u8), - factory_deps: None, + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!res.result.is_failed(), "{res:#?}"); let s = res.statistics.circuit_statistic; // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.003438, 0.00077, 0.1195, 0.1429, 0.0, + const EXPECTED: [f32; 13] = [ + 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, + 0.0, 0.0, 0.0, ]; let actual = [ (s.main_vm, "main_vm"), @@ -47,6 +49,8 @@ fn test_circuits() { (s.keccak256, "keccak256"), (s.ecrecover, "ecrecover"), (s.sha256, "sha256"), + (s.secp256k1_verify, "secp256k1_verify"), + (s.transient_storage_checker, "transient_storage_checker"), ]; for ((actual, name), expected) in actual.iter().zip(EXPECTED) { if expected == 0.0 { diff --git a/core/lib/multivm/src/versions/testonly/code_oracle.rs b/core/lib/multivm/src/versions/testonly/code_oracle.rs new file mode 100644 index 00000000000..767a294f44a --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/code_oracle.rs @@ -0,0 +1,242 @@ +use ethabi::Token; +use zksync_types::{ + get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use super::{ + get_empty_storage, load_precompiles_contract, read_precompiles_contract, read_test_contract, + tester::VmTesterBuilder, TestedVm, +}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + versions::testonly::ContractToDeploy, +}; + +fn generate_large_bytecode() -> Vec { + // This is the maximal possible size of a zkEVM bytecode + vec![2u8; ((1 << 16) - 1) * 32] +} + +pub(crate) fn test_code_oracle() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = read_precompiles_contract(); + + // Filling the zkevm bytecode + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode, + precompiles_contract_address, + )]) + .with_storage(storage) + .build::(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes(&[normal_zkevm_bytecode.as_slice()]); + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // Now, we ask for the same bytecode. We use to partially check whether the memory page with + // the decommitted bytecode gets erased (it shouldn't). + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +fn find_code_oracle_cost_log( + precompiles_contract_address: Address, + logs: &[StorageLogWithPreviousValue], +) -> &StorageLogWithPreviousValue { + logs.iter() + .find(|log| { + *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() + }) + .expect("no code oracle cost log") +} + +pub(crate) fn test_code_oracle_big_bytecode() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = read_precompiles_contract(); + + let big_zkevm_bytecode = generate_large_bytecode(); + let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); + let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); + + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&big_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode, + precompiles_contract_address, + )]) + .with_storage(storage) + .build::(); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + vm.vm.insert_bytecodes(&[big_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + + // Firstly, let's ensure that the contract works. + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); +} + +pub(crate) fn test_refunds_in_code_oracle() { + let precompiles_contract_address = Address::repeat_byte(1); + let precompile_contract_bytecode = read_precompiles_contract(); + + let normal_zkevm_bytecode = read_test_contract(); + let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); + let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); + let mut storage = get_empty_storage(); + storage.set_value( + get_known_code_key(&normal_zkevm_bytecode_hash), + u256_to_h256(U256::one()), + ); + + let precompile_contract = load_precompiles_contract(); + let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); + + // Execute code oracle twice with identical VM state that only differs in that the queried bytecode + // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas + // for already decommitted codes). + let mut oracle_costs = vec![]; + for decommit in [false, true] { + let mut vm = VmTesterBuilder::new() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + precompile_contract_bytecode.clone(), + precompiles_contract_address, + )]) + .with_storage(storage.clone()) + .build::(); + + vm.vm.insert_bytecodes(&[normal_zkevm_bytecode.as_slice()]); + + let account = &mut vm.rich_accounts[0]; + if decommit { + let is_fresh = vm.vm.manually_decommit(normal_zkevm_bytecode_hash); + assert!(is_fresh); + } + + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(precompiles_contract_address), + calldata: call_code_oracle_function + .encode_input(&[ + Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), + Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), + ]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + let log = + find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); + oracle_costs.push(log.log.value); + } + + // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` + // in `CodeOracle.yul`. + let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); + assert_eq!( + code_oracle_refund, + (4 * (normal_zkevm_bytecode.len() / 32)).into() + ); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs b/core/lib/multivm/src/versions/testonly/default_aa.rs similarity index 50% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs rename to core/lib/multivm/src/versions/testonly/default_aa.rs index b0717a57c56..b3fc5b635de 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/testonly/default_aa.rs @@ -1,31 +1,26 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; +use zksync_test_account::{DeployContractsTx, TxType}; use zksync_types::{ get_code_key, get_known_code_key, get_nonce_key, system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, + utils::storage_key_for_eth_balance, + U256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::h256_to_u256; +use super::{default_pubdata_builder, read_test_contract, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - }, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + vm_latest::utils::fee::get_batch_base_fee, }; -#[test] -fn test_default_aa_interaction() { +pub(crate) fn test_default_aa_interaction() { // In this test, we aim to test whether a simple account interaction (without any fee logic) // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let counter = read_test_contract(); let account = &mut vm.rich_accounts[0]; @@ -34,13 +29,18 @@ fn test_default_aa_interaction() { bytecode_hash, address, } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); + let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.l1_batch_env); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "Transaction wasn't successful"); - vm.vm.execute(VmExecutionMode::Batch); + let batch_result = vm.vm.finish_batch(default_pubdata_builder()); + assert!( + !batch_result.block_tip_execution_result.result.is_failed(), + "Batch tip execution wasn't successful" + ); + vm.vm.get_current_execution_state(); // Both deployment and ordinary nonce should be incremented by one. @@ -53,25 +53,16 @@ fn test_default_aa_interaction() { // The contract should be deployed successfully. let account_code_key = get_code_key(&address); - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - + let operator_balance_key = storage_key_for_eth_balance(&vm.l1_batch_env.fee_account); let expected_fee = maximal_fee - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); + * U256::from(get_batch_base_fee(&vm.l1_batch_env)); - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); + let expected_slots = [ + (account_nonce_key, expected_nonce), + (known_codes_key, 1.into()), + (account_code_key, h256_to_u256(bytecode_hash)), + (operator_balance_key, expected_fee), + ]; + vm.vm.verify_required_storage(&expected_slots); } diff --git a/core/lib/multivm/src/versions/testonly/gas_limit.rs b/core/lib/multivm/src/versions/testonly/gas_limit.rs new file mode 100644 index 00000000000..5e31eb2b159 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/gas_limit.rs @@ -0,0 +1,34 @@ +use zksync_test_account::Account; +use zksync_types::{fee::Fee, Execute}; + +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::TxExecutionMode, + vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, +}; + +/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. +pub(crate) fn test_tx_gas_limit_offset() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let gas_limit = 9999.into(); + let tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, + Some(Fee { + gas_limit, + ..Account::default_fee() + }), + ); + + vm.vm.push_transaction(tx); + + let slot = (TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET) as u32; + vm.vm.verify_required_bootloader_heap(&[(slot, gas_limit)]); +} diff --git a/core/lib/multivm/src/versions/testonly/get_used_contracts.rs b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs new file mode 100644 index 00000000000..9d0908807e2 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/get_used_contracts.rs @@ -0,0 +1,219 @@ +use std::iter; + +use assert_matches::assert_matches; +use ethabi::Token; +use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_test_account::{Account, TxType}; +use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; + +use super::{ + read_proxy_counter_contract, read_test_contract, + tester::{VmTester, VmTesterBuilder}, + TestedVm, +}; +use crate::{ + interface::{ + ExecutionResult, InspectExecutionMode, TxExecutionMode, VmExecutionResultAndLogs, + VmInterfaceExt, + }, + versions::testonly::ContractToDeploy, +}; + +pub(crate) fn test_get_used_contracts() { + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + assert!(vm.vm.known_bytecode_hashes().is_empty()); + + // create and push and execute some not-empty factory deps transaction with success status + // to check that `get_decommitted_hashes()` updates + let contract_code = read_test_contract(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); + vm.vm.push_transaction(tx.tx.clone()); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed()); + + assert!(vm + .vm + .decommitted_hashes() + .contains(&h256_to_u256(tx.bytecode_hash))); + + // Note: `Default_AA` will be in the list of used contracts if L2 tx is used + assert_eq!(vm.vm.decommitted_hashes(), vm.vm.known_bytecode_hashes()); + + // create push and execute some non-empty factory deps transaction that fails + // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) + + let calldata = [1, 2, 3]; + let big_calldata: Vec = calldata + .iter() + .cycle() + .take(calldata.len() * 1024) + .cloned() + .collect(); + let account2 = Account::from_seed(u32::MAX); + assert_ne!(account2.address, account.address); + let tx2 = account2.get_l1_tx( + Execute { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata: big_calldata, + value: Default::default(), + factory_deps: vec![vec![1; 32]], + }, + 1, + ); + + vm.vm.push_transaction(tx2.clone()); + + let res2 = vm.vm.execute(InspectExecutionMode::OneTx); + + assert!(res2.result.is_failed()); + + for factory_dep in tx2.execute.factory_deps { + let hash = hash_bytecode(&factory_dep); + let hash_to_u256 = h256_to_u256(hash); + assert!(vm.vm.known_bytecode_hashes().contains(&hash_to_u256)); + assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); + } +} + +/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial +/// decommitment cost (>10,000 gas). +fn inflated_counter_bytecode() -> Vec { + let mut counter_bytecode = read_test_contract(); + counter_bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(10_000) + .flatten(), + ); + counter_bytecode +} + +#[derive(Debug)] +struct ProxyCounterData { + proxy_counter_address: Address, + counter_bytecode_hash: U256, +} + +fn execute_proxy_counter( + gas: u32, +) -> (VmTester, ProxyCounterData, VmExecutionResultAndLogs) { + let counter_bytecode = inflated_counter_bytecode(); + let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); + let counter_address = Address::repeat_byte(0x23); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); + let account = &mut vm.rich_accounts[0]; + let deploy_tx = account.get_deploy_tx( + &proxy_counter_bytecode, + Some(&[Token::Address(counter_address)]), + TxType::L2, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + !decommitted_hashes.contains(&counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + let data = ProxyCounterData { + proxy_counter_address: deploy_tx.address, + counter_bytecode_hash, + }; + (vm, data, exec_result) +} + +pub(crate) fn test_get_used_contracts_with_far_call() { + let (vm, data, exec_result) = execute_proxy_counter::(100_000); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); +} + +pub(crate) fn test_get_used_contracts_with_out_of_gas_far_call() { + let (mut vm, data, exec_result) = execute_proxy_counter::(10_000); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + let decommitted_hashes = vm.vm.decommitted_hashes(); + assert!( + decommitted_hashes.contains(&data.counter_bytecode_hash), + "{decommitted_hashes:?}" + ); + + // Execute another transaction with a successful far call and check that it's still charged for decommitment. + let account = &mut vm.rich_accounts[0]; + let (_, proxy_counter_abi) = read_proxy_counter_contract(); + let increment = proxy_counter_abi.function("increment").unwrap(); + let increment_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(data.proxy_counter_address), + calldata: increment + .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = vm + .vm + .execute_transaction_with_bytecode_compression(increment_tx, true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let proxy_counter_cost_key = StorageKey::new( + AccountTreeId::new(data.proxy_counter_address), + H256::from_low_u64_be(1), + ); + let far_call_cost_log = exec_result + .logs + .storage_logs + .iter() + .find(|log| log.log.key == proxy_counter_cost_key) + .expect("no cost log"); + assert!( + far_call_cost_log.previous_value.is_zero(), + "{far_call_cost_log:?}" + ); + let far_call_cost = h256_to_u256(far_call_cost_log.log.value); + assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs b/core/lib/multivm/src/versions/testonly/is_write_initial.rs similarity index 65% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs rename to core/lib/multivm/src/versions/testonly/is_write_initial.rs index 7da250ef7a9..cac9be17363 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/testonly/is_write_initial.rs @@ -1,26 +1,21 @@ -use crate::interface::storage::ReadStorage; +use zksync_test_account::TxType; use zksync_types::get_nonce_key; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, +use super::{read_test_contract, tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ + storage::ReadStorage, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, }; -#[test] -fn test_is_write_initial_behaviour() { +pub(crate) fn test_is_write_initial_behaviour() { // In this test, we check result of `is_write_initial` at different stages. // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; let nonce_key = get_nonce_key(&account.address); // Check that the next write to the nonce key will be initial. @@ -34,7 +29,7 @@ fn test_is_write_initial_behaviour() { let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); // Check that `is_write_initial` still returns true for the nonce key. assert!(vm diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs similarity index 71% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs rename to core/lib/multivm/src/versions/testonly/l1_tx_execution.rs index 40915cf931c..e98a8385f02 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/testonly/l1_tx_execution.rs @@ -1,61 +1,57 @@ use ethabi::Token; use zksync_contracts::l1_messenger_contract; use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; +use zksync_test_account::TxType; use zksync_types::{ get_code_key, get_known_code_key, l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, Execute, ExecuteTransactionCommon, U256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{h256_to_u256, u256_to_h256}; +use super::{read_test_contract, tester::VmTesterBuilder, TestedVm, BASE_SYSTEM_CONTRACTS}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - }, + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + utils::StorageWritesDeduplicator, }; -#[test] -fn test_l1_tx_execution() { +pub(crate) fn test_l1_tx_execution() { // In this test, we try to execute a contract deployment from L1 // Here instead of marking code hash via the bootloader means, we will be // using L1->L2 communication, the same it would likely be done during the priority mode. - // There are always at least 7 initial writes here, because we pay fees from l1: + // There are always at least 9 initial writes here, because we pay fees from l1: // - `totalSupply` of ETH token // - balance of the refund recipient // - balance of the bootloader // - `tx_rolling` hash + // - `gasPerPubdataByte` + // - `basePubdataSpent` // - rolling hash of L2->L1 logs // - transaction number in block counter // - L2->L1 log counter in `L1Messenger` - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; + // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. + let basic_initial_writes = 5; - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let contract_code = read_test_contract(); let account = &mut vm.rich_accounts[0]; let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); + let tx_hash = deploy_tx.tx.hash(); let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { shard_id: 0, is_service: true, tx_number_in_block: 0, sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), + key: tx_hash, value: u256_to_h256(U256::from(1u32)), }] .into_iter() @@ -64,7 +60,7 @@ fn test_l1_tx_execution() { vm.vm.push_transaction(deploy_tx.tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); // The code hash of the deployed contract should be marked as republished. let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); @@ -72,14 +68,12 @@ fn test_l1_tx_execution() { // The contract should be deployed successfully. let account_code_key = get_code_key(&deploy_tx.address); - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; assert!(!res.result.is_failed()); - verify_required_storage(&vm.vm.state, expected_slots); - + vm.vm.verify_required_storage(&[ + (known_codes_key, U256::from(1)), + (account_code_key, h256_to_u256(deploy_tx.bytecode_hash)), + ]); assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); let tx = account.get_test_contract_transaction( @@ -90,12 +84,12 @@ fn test_l1_tx_execution() { TxType::L1 { serial_id: 0 }, ); vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); + assert_eq!(res.initial_storage_writes, basic_initial_writes); let tx = account.get_test_contract_transaction( deploy_tx.address, @@ -105,10 +99,10 @@ fn test_l1_tx_execution() { TxType::L1 { serial_id: 0 }, ); vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); let storage_logs = res.logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract + // We changed one slot inside contract. assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); // No repeated writes @@ -116,10 +110,11 @@ fn test_l1_tx_execution() { assert_eq!(res.repeated_storage_writes, 0); vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; + let storage_logs = vm.vm.execute(InspectExecutionMode::OneTx).logs.storage_logs; let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); + // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. + // But now the base pubdata spent has changed too. + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); assert_eq!(res.repeated_storage_writes, repeated_writes); let tx = account.get_test_contract_transaction( @@ -130,27 +125,26 @@ fn test_l1_tx_execution() { TxType::L1 { serial_id: 1 }, ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); // Method is not payable tx should fail assert!(result.result.is_failed(), "The transaction should fail"); let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); + assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); + assert_eq!(res.repeated_storage_writes, 1); } -#[test] -fn test_l1_tx_execution_high_gas_limit() { +pub(crate) fn test_l1_tx_execution_high_gas_limit() { // In this test, we try to execute an L1->L2 transaction with a high gas limit. // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; @@ -167,7 +161,7 @@ fn test_l1_tx_execution_high_gas_limit() { Execute { contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), - factory_deps: None, + factory_deps: vec![], calldata, }, 0, @@ -182,7 +176,7 @@ fn test_l1_tx_execution_high_gas_limit() { vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(res.result.is_failed(), "The transaction should've failed"); } diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs b/core/lib/multivm/src/versions/testonly/l2_blocks.rs similarity index 62% rename from core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs rename to core/lib/multivm/src/versions/testonly/l2_blocks.rs index 073d9ce5800..947d8b5859f 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/testonly/l2_blocks.rs @@ -3,110 +3,104 @@ //! The description for each of the tests can be found in the corresponding `.yul` file. //! -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; +use assert_matches::assert_matches; use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, + block::{pack_block_info, L2BlockHasher}, + AccountTreeId, Address, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, + L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, H256, SYSTEM_CONTEXT_ADDRESS, + SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, }; use zksync_utils::{h256_to_u256, u256_to_h256}; +use super::{default_l1_batch, get_empty_storage, tester::VmTesterBuilder, TestedVm}; use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, + interface::{ + storage::StorageView, ExecutionResult, Halt, InspectExecutionMode, L2BlockEnv, + TxExecutionMode, VmInterfaceExt, + }, + vm_latest::{ + constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, }, - HistoryMode, }; fn get_l1_noop() -> Transaction { Transaction { common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), + sender: Address::repeat_byte(1), gas_limit: U256::from(2000000u32), gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), ..Default::default() }), execute: Execute { - contract_address: H160::zero(), + contract_address: Some(Address::repeat_byte(0xc0)), calldata: vec![], value: U256::zero(), - factory_deps: None, + factory_deps: vec![], }, received_timestamp_ms: 0, raw_bytes: None, } } -#[test] -fn test_l2_block_initialization_timestamp() { +pub(crate) fn test_l2_block_initialization_timestamp() { // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp + // Here we check that the first block must have timestamp that is greater or equal to the timestamp // of the current batch. - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { + // Override the timestamp of the current L2 block to be 0. + vm.vm.push_l2_block_unchecked(L2BlockEnv { number: 1, timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), max_virtual_blocks_to_create: 1, }); let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); - assert_eq!( + assert_matches!( res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} + ExecutionResult::Halt { reason: Halt::FailedToSetL2Block(msg) } + if msg.contains("timestamp") ); } -#[test] -fn test_l2_block_initialization_number_non_zero() { +pub(crate) fn test_l2_block_initialization_number_non_zero() { // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. + // Here we check that the first L2 block number can not be zero. let l1_batch = default_l1_batch(L1BatchNumber(1)); let first_l2_block = L2BlockEnv { number: 0, timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), max_virtual_blocks_to_create: 1, }; - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); + set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( res.result, @@ -118,26 +112,26 @@ fn test_l2_block_initialization_number_non_zero() { ); } -fn test_same_l2_block( +fn test_same_l2_block( expected_error: Option, override_timestamp: Option, override_prev_block_hash: Option, ) { let mut l1_batch = default_l1_batch(L1BatchNumber(1)); l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let l1_tx = get_l1_noop(); vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); + let res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!res.result.is_failed()); - let mut current_l2_block = vm.vm.batch_env.first_l2_block; + let mut current_l2_block = vm.l1_batch_env.first_l2_block; if let Some(timestamp) = override_timestamp { current_l2_block.timestamp = timestamp; @@ -151,10 +145,9 @@ fn test_same_l2_block( } vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); + set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); @@ -163,12 +156,11 @@ fn test_same_l2_block( } } -#[test] -fn test_l2_block_same_l2_block() { +pub(crate) fn test_l2_block_same_l2_block() { // This test aims to test the case when there are multiple transactions inside the same L2 block. // Case 1: Incorrect timestamp - test_same_l2_block( + test_same_l2_block::( Some(Halt::FailedToSetL2Block( "The timestamp of the same L2 block must be same".to_string(), )), @@ -177,7 +169,7 @@ fn test_l2_block_same_l2_block() { ); // Case 2: Incorrect previous block hash - test_same_l2_block( + test_same_l2_block::( Some(Halt::FailedToSetL2Block( "The previous hash of the same L2 block must be same".to_string(), )), @@ -186,10 +178,10 @@ fn test_l2_block_same_l2_block() { ); // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); + test_same_l2_block::(None, None, None); } -fn test_new_l2_block( +fn test_new_l2_block( first_l2_block: L2BlockEnv, overriden_second_block_number: Option, overriden_second_block_timestamp: Option, @@ -200,23 +192,23 @@ fn test_new_l2_block( l1_batch.timestamp = 1; l1_batch.first_l2_block = first_l2_block; - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_l1_batch_env(l1_batch) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let l1_tx = get_l1_noop(); // Firstly we execute the first transaction vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); - let mut second_l2_block = vm.vm.batch_env.first_l2_block; + let mut second_l2_block = vm.l1_batch_env.first_l2_block; second_l2_block.number += 1; second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); + second_l2_block.prev_block_hash = vm.vm.last_l2_block_hash(); if let Some(block_number) = overriden_second_block_number { second_l2_block.number = block_number; @@ -228,11 +220,10 @@ fn test_new_l2_block( second_l2_block.prev_block_hash = prev_block_hash; } - vm.vm.bootloader_state.push_l2_block(second_l2_block); - + vm.vm.push_l2_block_unchecked(second_l2_block); vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); } else { @@ -240,19 +231,18 @@ fn test_new_l2_block( } } -#[test] -fn test_l2_block_new_l2_block() { +pub(crate) fn test_l2_block_new_l2_block() { // This test is aimed to cover potential issue let correct_first_block = L2BlockEnv { number: 1, timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), max_virtual_blocks_to_create: 1, }; // Case 1: Block number increasing by more than 1 - test_new_l2_block( + test_new_l2_block::( correct_first_block, Some(3), None, @@ -263,7 +253,7 @@ fn test_l2_block_new_l2_block() { ); // Case 2: Timestamp not increasing - test_new_l2_block( + test_new_l2_block::( correct_first_block, None, Some(1), @@ -272,7 +262,7 @@ fn test_l2_block_new_l2_block() { ); // Case 3: Incorrect previous block hash - test_new_l2_block( + test_new_l2_block::( correct_first_block, None, None, @@ -283,11 +273,11 @@ fn test_l2_block_new_l2_block() { ); // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); + test_new_l2_block::(correct_first_block, None, None, None, None); } #[allow(clippy::too_many_arguments)] -fn test_first_in_batch( +fn test_first_in_batch( miniblock_timestamp: u64, miniblock_number: u32, pending_txs_hash: H256, @@ -301,16 +291,15 @@ fn test_first_in_batch( l1_batch.number += 1; l1_batch.timestamp = new_batch_timestamp; - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_l1_batch_env(l1_batch) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let l1_tx = get_l1_noop(); // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); let miniblock_info_slot = StorageKey::new( AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, @@ -325,42 +314,43 @@ fn test_first_in_batch( ); let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - storage_ptr.borrow_mut().set_value( + let mut storage = get_empty_storage(); + storage.set_value( miniblock_info_slot, u256_to_h256(pack_block_info( miniblock_number as u64, miniblock_timestamp, )), ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( + storage.set_value(pending_txs_hash_slot, pending_txs_hash); + storage.set_value( batch_info_slot, u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), ); - storage_ptr.borrow_mut().set_value( + storage.set_value( prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), + L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), ); + // Replace the storage entirely. It's not enough to write to the underlying storage (since read values are already cached + // in the storage view). + *vm.storage.borrow_mut() = StorageView::new(storage); // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. // And then override it with the user-provided value - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); + let last_l2_block = vm.l1_batch_env.first_l2_block; let new_l2_block = L2BlockEnv { number: last_l2_block.number + 1, timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), + prev_block_hash: vm.vm.last_l2_block_hash(), max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, }; - vm.vm.bootloader_state.push_l2_block(new_l2_block); + vm.vm.push_l2_block_unchecked(new_l2_block); vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); + set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); if let Some(err) = expected_error { assert_eq!(result.result, ExecutionResult::Halt { reason: err }); } else { @@ -368,12 +358,11 @@ fn test_first_in_batch( } } -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) +pub(crate) fn test_l2_block_first_in_batch() { + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) .finalize(ProtocolVersionId::latest()); - test_first_in_batch( + test_first_in_batch::( 1, 1, H256::zero(), @@ -389,10 +378,10 @@ fn test_l2_block_first_in_batch() { None, ); - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) + let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); + let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) .finalize(ProtocolVersionId::latest()); - test_first_in_batch( + test_first_in_batch::( 8, 1, H256::zero(), @@ -409,29 +398,19 @@ fn test_l2_block_first_in_batch() { ); } -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { +fn set_manual_l2_block_info(vm: &mut impl TestedVm, tx_number: usize, block_info: L2BlockEnv) { let fictive_miniblock_position = TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) + vm.write_to_bootloader_heap(&[ + (fictive_miniblock_position, block_info.number.into()), + (fictive_miniblock_position + 1, block_info.timestamp.into()), + ( + fictive_miniblock_position + 2, + h256_to_u256(block_info.prev_block_hash), + ), + ( + fictive_miniblock_position + 3, + block_info.max_virtual_blocks_to_create.into(), + ), + ]) } diff --git a/core/lib/multivm/src/versions/testonly/mod.rs b/core/lib/multivm/src/versions/testonly/mod.rs new file mode 100644 index 00000000000..eece1d475bb --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/mod.rs @@ -0,0 +1,243 @@ +//! Reusable tests and tooling for low-level VM testing. +//! +//! # How it works +//! +//! - [`TestedVm`] defines test-specific VM extensions. It's currently implemented for the latest legacy VM +//! (`vm_latest`) and the fast VM (`vm_fast`). +//! - Submodules of this module define test functions generic by `TestedVm`. Specific VM versions implement `TestedVm` +//! and can create tests based on these test functions with minimum amount of boilerplate code. +//! - Tests use [`VmTester`] built using [`VmTesterBuilder`] to create a VM instance. This allows to set up storage for the VM, +//! custom [`SystemEnv`] / [`L1BatchEnv`], deployed contracts, pre-funded accounts etc. + +use std::{collections::HashSet, rc::Rc}; + +use ethabi::Contract; +use once_cell::sync::Lazy; +use zksync_contracts::{ + load_contract, read_bootloader_code, read_bytecode, read_zbin_bytecode, BaseSystemContracts, + SystemContractCode, +}; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, + utils::storage_key_for_eth_balance, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, +}; + +pub(super) use self::tester::{TestedVm, VmTester, VmTesterBuilder}; +use crate::{ + interface::storage::InMemoryStorage, pubdata_builders::RollupPubdataBuilder, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(super) mod block_tip; +pub(super) mod bootloader; +pub(super) mod bytecode_publishing; +pub(super) mod circuits; +pub(super) mod code_oracle; +pub(super) mod default_aa; +pub(super) mod gas_limit; +pub(super) mod get_used_contracts; +pub(super) mod is_write_initial; +pub(super) mod l1_tx_execution; +pub(super) mod l2_blocks; +pub(super) mod nonce_holder; +pub(super) mod precompiles; +pub(super) mod refunds; +pub(super) mod require_eip712; +pub(super) mod rollbacks; +pub(super) mod secp256r1; +pub(super) mod simple_execution; +pub(super) mod storage; +mod tester; +pub(super) mod tracing_execution_error; +pub(super) mod transfer; +pub(super) mod upgrade; + +static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +fn get_empty_storage() -> InMemoryStorage { + InMemoryStorage::with_system_contracts(hash_bytecode) +} + +pub(crate) fn read_test_contract() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") +} + +fn get_complex_upgrade_abi() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" + ) +} + +fn read_complex_upgrade() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") +} + +fn read_precompiles_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +fn load_precompiles_contract() -> Contract { + load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", + ) +} + +fn read_proxy_counter_contract() -> (Vec, Contract) { + const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; + (read_bytecode(PATH), load_contract(PATH)) +} + +fn read_nonce_holder_tester() -> Vec { + read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") +} + +fn read_expensive_contract() -> (Vec, Contract) { + const PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; + (read_bytecode(PATH), load_contract(PATH)) +} + +fn read_many_owners_custom_account_contract() -> (Vec, Contract) { + let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; + (read_bytecode(path), load_contract(path)) +} + +fn read_error_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ) +} + +pub(crate) fn read_max_depth_contract() -> Vec { + read_zbin_bytecode( + "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", + ) +} + +pub(crate) fn read_simple_transfer_contract() -> Vec { + read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/simple-transfer/simple-transfer.sol/SimpleTransfer.json", + ) +} + +pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { + let bootloader_code = read_bootloader_code(test); + let bootloader_hash = hash_bytecode(&bootloader_code); + SystemContractCode { + code: bytes_to_be_words(bootloader_code), + hash: bootloader_hash, + } +} + +pub(crate) fn filter_out_base_system_contracts(all_bytecode_hashes: &mut HashSet) { + all_bytecode_hashes.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); + if let Some(evm_emulator) = &BASE_SYSTEM_CONTRACTS.evm_emulator { + all_bytecode_hashes.remove(&h256_to_u256(evm_emulator.hash)); + } +} + +pub(super) fn default_system_env() -> SystemEnv { + SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + } +} + +pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + // Add a bias to the timestamp to make it more realistic / "random". + let timestamp = 1_700_000_000 + u64::from(number.0); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::repeat_byte(1), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(super) fn default_pubdata_builder() -> Rc { + Rc::new(RollupPubdataBuilder::new(Address::zero())) +} + +pub(super) fn make_address_rich(storage: &mut InMemoryStorage, address: Address) { + let key = storage_key_for_eth_balance(&address); + storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); +} + +#[derive(Debug, Clone)] +pub(super) struct ContractToDeploy { + bytecode: Vec, + address: Address, + is_account: bool, + is_funded: bool, +} + +impl ContractToDeploy { + pub fn new(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: false, + is_funded: false, + } + } + + pub fn account(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: true, + is_funded: false, + } + } + + #[must_use] + pub fn funded(mut self) -> Self { + self.is_funded = true; + self + } + + pub fn insert(&self, storage: &mut InMemoryStorage) { + let deployer_code_key = get_code_key(&self.address); + storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); + if self.is_account { + let is_account_key = get_is_account_key(&self.address); + storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); + + if self.is_funded { + make_address_rich(storage, self.address); + } + } + + /// Inserts the contracts into the test environment, bypassing the deployer system contract. + pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { + for contract in contracts { + contract.insert(storage); + } + } +} diff --git a/core/lib/multivm/src/versions/testonly/nonce_holder.rs b/core/lib/multivm/src/versions/testonly/nonce_holder.rs new file mode 100644 index 00000000000..36f736c0bbe --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/nonce_holder.rs @@ -0,0 +1,200 @@ +use zksync_test_account::Account; +use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; + +use super::{read_nonce_holder_tester, tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{ + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, TxRevertReason, VmInterfaceExt, + VmRevertReason, +}; + +pub enum NonceHolderTestMode { + SetValueUnderNonce, + IncreaseMinNonceBy5, + IncreaseMinNonceTooMuch, + LeaveNonceUnused, + IncreaseMinNonceBy1, + SwitchToArbitraryOrdering, +} + +impl From for u8 { + fn from(mode: NonceHolderTestMode) -> u8 { + match mode { + NonceHolderTestMode::SetValueUnderNonce => 0, + NonceHolderTestMode::IncreaseMinNonceBy5 => 1, + NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, + NonceHolderTestMode::LeaveNonceUnused => 3, + NonceHolderTestMode::IncreaseMinNonceBy1 => 4, + NonceHolderTestMode::SwitchToArbitraryOrdering => 5, + } + } +} + +fn run_nonce_test( + vm: &mut impl TestedVm, + account: &mut Account, + nonce: u32, + test_mode: NonceHolderTestMode, + error_message: Option, + comment: &'static str, +) { + vm.make_snapshot(); + let mut transaction = account.get_l2_tx_for_execute_with_nonce( + Execute { + contract_address: Some(account.address), + calldata: vec![12], + value: Default::default(), + factory_deps: vec![], + }, + None, + Nonce(nonce), + ); + let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { + unreachable!(); + }; + tx_data.signature = vec![test_mode.into()]; + vm.push_transaction(transaction); + let result = vm.execute(InspectExecutionMode::OneTx); + + if let Some(msg) = error_message { + let expected_error = + TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { + msg, + data: vec![], + })); + let ExecutionResult::Halt { reason } = &result.result else { + panic!("Expected revert, got {:?}", result.result); + }; + assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); + vm.rollback_to_the_latest_snapshot(); + } else { + assert!(!result.result.is_failed(), "{}", comment); + vm.pop_snapshot_no_rollback(); + } +} + +pub(crate) fn test_nonce_holder() { + let builder = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1); + let account_address = builder.rich_account(0).address; + let mut vm = builder + .with_custom_contracts(vec![ContractToDeploy::account( + read_nonce_holder_tester(), + account_address, + )]) + .build::(); + let account = &mut vm.rich_accounts[0]; + let hex_addr = hex::encode(account.address.to_fixed_bytes()); + + // Test 1: trying to set value under non sequential nonce value. + run_nonce_test( + &mut vm.vm, + account, + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), + "Allowed to set value under non sequential value", + ); + + // Test 2: increase min nonce by 1 with sequential nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 0u32, + NonceHolderTestMode::IncreaseMinNonceBy1, + None, + "Failed to increment nonce by 1 for sequential account", + ); + + // Test 3: correctly set value under nonce with sequential nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 1u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Failed to set value under nonce sequential value", + ); + + // Test 5: migrate to the arbitrary nonce ordering: + run_nonce_test( + &mut vm.vm, + account, + 2u32, + NonceHolderTestMode::SwitchToArbitraryOrdering, + None, + "Failed to switch to arbitrary ordering", + ); + + // Test 6: increase min nonce by 5 + run_nonce_test( + &mut vm.vm, + account, + 6u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Failed to increase min nonce by 5", + ); + + // Test 7: since the nonces in range [6,10] are no longer allowed, the + // tx with nonce 10 should not be allowed + run_nonce_test( + &mut vm.vm, + account, + 10u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), + "Allowed to reuse nonce below the minimal one", + ); + + // Test 8: we should be able to use nonce 13 + run_nonce_test( + &mut vm.vm, + account, + 13u32, + NonceHolderTestMode::SetValueUnderNonce, + None, + "Did not allow to use unused nonce 10", + ); + + // Test 9: we should not be able to reuse nonce 13 + run_nonce_test( + &mut vm.vm, + account, + 13u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), + "Allowed to reuse the same nonce twice", + ); + + // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 + run_nonce_test( + &mut vm.vm, + account, + 14u32, + NonceHolderTestMode::IncreaseMinNonceBy5, + None, + "Did not allow to use a bumped nonce", + ); + + // Test 11: Do not allow bumping nonce by too much + run_nonce_test( + &mut vm.vm, + account, + 16u32, + NonceHolderTestMode::IncreaseMinNonceTooMuch, + Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), + "Allowed for incrementing min nonce too much", + ); + + // Test 12: Do not allow not setting a nonce as used + run_nonce_test( + &mut vm.vm, + account, + 16u32, + NonceHolderTestMode::LeaveNonceUnused, + Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), + "Allowed to leave nonce as unused", + ); +} diff --git a/core/lib/multivm/src/versions/testonly/precompiles.rs b/core/lib/multivm/src/versions/testonly/precompiles.rs new file mode 100644 index 00000000000..2e26dc134b0 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/precompiles.rs @@ -0,0 +1,110 @@ +use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; +use zksync_types::{Address, Execute}; + +use super::{read_precompiles_contract, tester::VmTesterBuilder, TestedVm}; +use crate::{ + interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}, + versions::testonly::ContractToDeploy, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(crate) fn test_keccak() { + // Execute special transaction and check that at least 1000 keccak calls were made. + let contract = read_precompiles_contract(); + let address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); + + // calldata for `doKeccak(1000)`. + let keccak1000_calldata = + "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: hex::decode(keccak1000_calldata).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let keccak_count = exec_result.statistics.circuit_statistic.keccak256 + * get_geometry_config().cycles_per_keccak256_circuit as f32; + assert!(keccak_count >= 1000.0, "{keccak_count}"); +} + +pub(crate) fn test_sha256() { + // Execute special transaction and check that at least 1000 `sha256` calls were made. + let contract = read_precompiles_contract(); + let address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); + + // calldata for `doSha256(1000)`. + let sha1000_calldata = + "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(address), + calldata: hex::decode(sha1000_calldata).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let sha_count = exec_result.statistics.circuit_statistic.sha256 + * get_geometry_config().cycles_per_sha256_circuit as f32; + assert!(sha_count >= 1000.0, "{sha_count}"); +} + +pub(crate) fn test_ecrecover() { + // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_rich_accounts(1) + .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(account.address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(tx); + + let exec_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); + + let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover + * get_geometry_config().cycles_per_ecrecover_circuit as f32; + assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); +} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs b/core/lib/multivm/src/versions/testonly/refunds.rs similarity index 51% rename from core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs rename to core/lib/multivm/src/versions/testonly/refunds.rs index 54c281a9939..edab843be4f 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs +++ b/core/lib/multivm/src/versions/testonly/refunds.rs @@ -1,32 +1,30 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; +use ethabi::Token; +use zksync_test_account::TxType; +use zksync_types::{Address, Execute, U256}; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; +use super::{ + default_pubdata_builder, read_expensive_contract, read_test_contract, tester::VmTesterBuilder, + ContractToDeploy, TestedVm, +}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; -#[test] -fn test_predetermined_refunded_gas() { +pub(crate) fn test_predetermined_refunded_gas() { // In this test, we compare the execution of the bootloader with the predefined // refunded gas and without them - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); + .with_rich_accounts(1) + .build::(); + let l1_batch = vm.l1_batch_env.clone(); let counter = read_test_contract(); let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); + let tx = account.get_deploy_tx(&counter, None, TxType::L2).tx; vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); @@ -39,7 +37,10 @@ fn test_predetermined_refunded_gas() { ); assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let result_without_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result_without_predefined_refunds.result.is_failed(),); @@ -47,21 +48,21 @@ fn test_predetermined_refunded_gas() { // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. // But the overall result should be the same - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_l1_batch_env(l1_batch.clone()) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); + .with_rich_accounts(1) + .build::(); + assert_eq!(account.address(), vm.rich_accounts[0].address()); - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); + .push_transaction_with_refund(tx.clone(), result.refunds.gas_refunded); - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); + let result_with_predefined_refunds = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result_with_predefined_refunds.result.is_failed()); @@ -90,13 +91,8 @@ fn test_predetermined_refunded_gas() { ); assert_eq!( - current_state_with_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries + current_state_with_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs ); assert_eq!( current_state_with_predefined_refunds.used_contract_hashes, @@ -106,17 +102,21 @@ fn test_predetermined_refunded_gas() { // In this test we put the different refund from the operator. // We still can't use the refund tracer, because it will override the refund. // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_l1_batch_env(l1_batch) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); + .with_rich_accounts(1) + .build::(); + assert_eq!(account.address(), vm.rich_accounts[0].address()); let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); + .push_transaction_with_refund(tx, changed_operator_suggested_refund); + let result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); assert!(!result.result.is_failed()); @@ -147,26 +147,78 @@ fn test_predetermined_refunded_gas() { current_state_without_predefined_refunds.system_logs ); - assert_eq!( - current_state_with_changed_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - assert_eq!( current_state_with_changed_predefined_refunds - .storage_log_queries + .deduplicated_storage_logs .len(), current_state_without_predefined_refunds - .storage_log_queries + .deduplicated_storage_logs .len() ); assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries + current_state_with_changed_predefined_refunds.deduplicated_storage_logs, + current_state_without_predefined_refunds.deduplicated_storage_logs ); assert_eq!( current_state_with_changed_predefined_refunds.used_contract_hashes, current_state_without_predefined_refunds.used_contract_hashes ); } + +pub(crate) fn test_negative_pubdata_for_transaction() { + let expensive_contract_address = Address::repeat_byte(1); + let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); + let expensive_function = expensive_contract.function("expensive").unwrap(); + let cleanup_function = expensive_contract.function("cleanUp").unwrap(); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new( + expensive_contract_bytecode, + expensive_contract_address, + )]) + .build::(); + + let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(expensive_contract_address), + calldata: expensive_function + .encode_input(&[Token::Uint(10.into())]) + .unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(expensive_tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + + // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. + let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( + Execute { + contract_address: Some(expensive_contract_address), + calldata: cleanup_function.encode_input(&[]).unwrap(), + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + vm.vm.push_transaction(clean_up_tx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !result.result.is_failed(), + "Transaction wasn't successful: {result:#?}" + ); + assert!(result.refunds.operator_suggested_refund > 0); + assert_eq!( + result.refunds.gas_refunded, + result.refunds.operator_suggested_refund + ); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs b/core/lib/multivm/src/versions/testonly/require_eip712.rs similarity index 61% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs rename to core/lib/multivm/src/versions/testonly/require_eip712.rs index 15f4504d6e1..e789fbda290 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/testonly/require_eip712.rs @@ -1,57 +1,38 @@ -use std::convert::TryInto; - use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; +use zksync_eth_signer::TransactionParameters; use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, + fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, Address, Eip712Domain, Execute, L2ChainId, Nonce, Transaction, U256, }; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, +use super::{ + read_many_owners_custom_account_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, }; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. /// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { +pub(crate) fn test_require_eip712() { // Use 3 accounts: // - `private_address` - EOA account, where we have the key // - `account_address` - AA account, where the contract is deployed // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); + let aa_address = Address::repeat_byte(0x10); + let beneficiary_address = Address::repeat_byte(0x20); let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) + .with_custom_contracts(vec![ + ContractToDeploy::account(bytecode, aa_address).funded() + ]) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - + .with_rich_accounts(1) + .build::(); + assert_eq!(vm.get_eth_balance(beneficiary_address), U256::from(0)); let chain_id: u32 = 270; + let mut private_account = vm.rich_accounts[0].clone(); // First, let's set the owners of the AA account to the `private_address`. // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). @@ -62,16 +43,16 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(aa_address), calldata: encoded_input, value: Default::default(), - factory_deps: None, + factory_deps: vec![], }, None, ); vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); let private_account_balance = vm.get_eth_balance(private_account.address); @@ -80,7 +61,7 @@ async fn test_require_eip712() { // Normally this would not work - unless the operator is malicious. let aa_raw_tx = TransactionParameters { nonce: U256::from(0), - to: Some(beneficiary.address), + to: Some(beneficiary_address), gas: U256::from(100000000), gas_price: Some(U256::from(10000000)), value: U256::from(888000088), @@ -94,20 +75,21 @@ async fn test_require_eip712() { blob_versioned_hashes: None, }; - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); + let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000, false).unwrap(); l2_tx.set_input(aa_tx, hash); // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); + l2_tx.common_data.initiator_address = aa_address; + let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); + assert_eq!( - vm.get_eth_balance(beneficiary.address), + vm.get_eth_balance(beneficiary_address), U256::from(888000088) ); // Make sure that the tokens were transferred from the AA account. @@ -118,7 +100,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary_address), vec![], Nonce(1), Fee { @@ -127,34 +109,34 @@ async fn test_require_eip712() { max_priority_fee_per_gas: U256::from(1000000000), gas_per_pubdata_limit: U256::from(1000000000), }, - account_abstraction.address, + aa_address, U256::from(28374938), - None, + vec![], Default::default(), ); - let transaction_request: TransactionRequest = tx_712.into(); + let mut transaction_request: TransactionRequest = tx_712.into(); + transaction_request.chain_id = Some(chain_id.into()); let domain = Eip712Domain::new(L2ChainId::from(chain_id)); let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) - .await .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); + let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); let (aa_txn_request, aa_hash) = TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); + let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000, false).unwrap(); l2_tx.set_input(encoded_tx, aa_hash); - let transaction: Transaction = l2_tx.try_into().unwrap(); + let transaction: Transaction = l2_tx.into(); vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( - vm.get_eth_balance(beneficiary.address), + vm.get_eth_balance(beneficiary_address), U256::from(916375026) ); assert_eq!( diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs b/core/lib/multivm/src/versions/testonly/rollbacks.rs similarity index 50% rename from core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs rename to core/lib/multivm/src/versions/testonly/rollbacks.rs index 240b7188377..cab3427899e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/testonly/rollbacks.rs @@ -1,24 +1,24 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; +use std::collections::HashMap; -use zksync_types::{Execute, U256}; - -use crate::interface::TxExecutionMode; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, +use assert_matches::assert_matches; +use ethabi::Token; +use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; +use zksync_test_account::{DeployContractsTx, TxType}; +use zksync_types::{Address, Execute, Nonce, U256}; + +use super::{ + read_test_contract, + tester::{TransactionTestInfo, TxModifier, VmTesterBuilder}, + ContractToDeploy, TestedVm, }; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; +use crate::interface::{storage::ReadStorage, ExecutionResult, TxExecutionMode, VmInterfaceExt}; -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) +pub(crate) fn test_vm_rollbacks() { + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let mut account = vm.rich_accounts[0].clone(); let counter = read_test_contract(); @@ -40,34 +40,51 @@ fn test_vm_rollbacks() { TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_0.clone(), false), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_1, false), // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), // This tx will succeed TransactionTestInfo::new_processed(tx_2.clone(), false), // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + tx_2.clone(), + TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), + ), + TransactionTestInfo::new_rejected( + tx_0.clone(), + TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), + ), ]); - assert_eq!(result_without_rollbacks, result_with_rollbacks); + pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); } -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) +pub(crate) fn test_vm_loadnext_rollbacks() { + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let mut account = vm.rich_accounts[0].clone(); let loadnext_contract = get_loadnext_contract(); @@ -85,7 +102,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -96,14 +113,14 @@ fn test_vm_loadnext_rollbacks() { } .to_bytes(), value: Default::default(), - factory_deps: None, + factory_deps: vec![], }, None, ); let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -114,7 +131,7 @@ fn test_vm_loadnext_rollbacks() { } .to_bytes(), value: Default::default(), - factory_deps: None, + factory_deps: vec![], }, None, ); @@ -133,14 +150,63 @@ fn test_vm_loadnext_rollbacks() { TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), TransactionTestInfo::new_rejected( loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), ), TransactionTestInfo::new_processed(loadnext_tx_1, false), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), + TransactionTestInfo::new_rejected( + loadnext_deploy_tx.clone(), + TxModifier::NonceReused( + loadnext_deploy_tx.initiator_account(), + loadnext_deploy_tx.nonce().unwrap(), + ) + .into(), + ), TransactionTestInfo::new_processed(loadnext_tx_2, false), ]); assert_eq!(result_without_rollbacks, result_with_rollbacks); } + +pub(crate) fn test_rollback_in_call_mode() { + let counter_bytecode = read_test_contract(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + + let storage_logs = &vm_result.logs.storage_logs; + let deduplicated_logs = storage_logs + .iter() + .filter_map(|log| log.log.is_write().then_some((log.log.key, log.log.value))); + let deduplicated_logs: HashMap<_, _> = deduplicated_logs.collect(); + // Check that all storage changes are reverted + let mut storage = vm.storage.borrow_mut(); + for (key, value) in deduplicated_logs { + assert_eq!(storage.inner_mut().read_value(&key), value); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/testonly/secp256r1.rs similarity index 89% rename from core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs rename to core/lib/multivm/src/versions/testonly/secp256r1.rs index 55ca372c4a9..37d428f8210 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/testonly/secp256r1.rs @@ -3,21 +3,18 @@ use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; use zksync_types::{web3::keccak256, Execute, H256, U256}; use zksync_utils::h256_to_u256; -use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::tester::VmTesterBuilder, -}; +use super::{tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; -#[test] -fn test_sekp256r1() { +pub(crate) fn test_secp256r1() { // In this test, we aim to test whether a simple account interaction (without any fee logic) // will work. The account will try to deploy a simple contract from integration tests. let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_execution_mode(TxExecutionMode::EthCall) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; @@ -58,7 +55,7 @@ fn test_sekp256r1() { vm.vm.push_transaction(tx); - let execution_result = vm.vm.execute(VmExecutionMode::Batch); + let execution_result = vm.vm.execute(InspectExecutionMode::OneTx); let ExecutionResult::Success { output } = execution_result.result else { panic!("batch failed") diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs b/core/lib/multivm/src/versions/testonly/simple_execution.rs similarity index 63% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs rename to core/lib/multivm/src/versions/testonly/simple_execution.rs index 57b37e67b76..96239fb362d 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/testonly/simple_execution.rs @@ -1,15 +1,14 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::tester::{TxType, VmTesterBuilder}, -}; +use assert_matches::assert_matches; +use zksync_test_account::TxType; -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) +use super::{default_pubdata_builder, tester::VmTesterBuilder, TestedVm}; +use crate::interface::{ExecutionResult, InspectExecutionMode, VmInterfaceExt}; + +pub(crate) fn test_estimate_fee() { + let mut vm_tester = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); vm_tester.deploy_test_contract(); let account = &mut vm_tester.rich_accounts[0]; @@ -24,17 +23,15 @@ fn estimate_fee() { vm_tester.vm.push_transaction(tx); - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); + let result = vm_tester.vm.execute(InspectExecutionMode::OneTx); assert_matches!(result.result, ExecutionResult::Success { .. }); } -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) +pub(crate) fn test_simple_execute() { + let mut vm_tester = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); vm_tester.deploy_test_contract(); @@ -67,12 +64,14 @@ fn simple_execute() { vm.push_transaction(tx1); vm.push_transaction(tx2); vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); + let tx = vm.execute(InspectExecutionMode::OneTx); assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); + let block_tip = vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; assert_matches!(block_tip.result, ExecutionResult::Success { .. }); } diff --git a/core/lib/multivm/src/versions/testonly/storage.rs b/core/lib/multivm/src/versions/testonly/storage.rs new file mode 100644 index 00000000000..efe7be1edbd --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/storage.rs @@ -0,0 +1,125 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_types::{Address, Execute, U256}; + +use super::{tester::VmTesterBuilder, ContractToDeploy, TestedVm}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; + +fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { + let bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let test_contract_address = Address::repeat_byte(1); + + // In this test, we aim to test whether a simple account interaction (without any fee logic) + // will work. The account will try to deploy a simple contract from integration tests. + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata: first_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata: second_tx_calldata, + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx1); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "First tx failed"); + vm.vm.pop_snapshot_no_rollback(); + + // We rollback once because transient storage and rollbacks are a tricky combination. + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2.clone()); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed"); + vm.vm.rollback_to_the_latest_snapshot(); + + vm.vm.make_snapshot(); + vm.vm.push_transaction(tx2); + let result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!(!result.result.is_failed(), "Second tx failed on second run"); + + result.statistics.pubdata_published +} + +fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { + test_storage::(vec![], second_tx_calldata) +} + +pub(crate) fn test_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + // In all of the tests below we provide the first tx to ensure that the tracers will not include + // the statistics from the start of the bootloader and will only include those for the transaction itself. + + let base_pubdata = test_storage_one_tx::(vec![]); + let simple_test_pubdata = test_storage_one_tx::( + contract + .function("simpleWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_pubdata = test_storage_one_tx::( + contract + .function("resettingWrite") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + let resetting_write_via_revert_pubdata = test_storage_one_tx::( + contract + .function("resettingWriteViaRevert") + .unwrap() + .encode_input(&[]) + .unwrap(), + ); + + assert_eq!(simple_test_pubdata - base_pubdata, 65); + assert_eq!(resetting_write_pubdata - base_pubdata, 34); + assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +} + +pub(crate) fn test_transient_storage_behavior() { + let contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", + ); + + let first_tstore_test = contract + .function("testTransientStore") + .unwrap() + .encode_input(&[]) + .unwrap(); + // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. + let second_tstore_test = contract + .function("assertTValue") + .unwrap() + .encode_input(&[Token::Uint(U256::zero())]) + .unwrap(); + + test_storage::(first_tstore_test, second_tstore_test); +} diff --git a/core/lib/multivm/src/versions/testonly/tester/mod.rs b/core/lib/multivm/src/versions/testonly/tester/mod.rs new file mode 100644 index 00000000000..716b9386235 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/tester/mod.rs @@ -0,0 +1,231 @@ +use std::{collections::HashSet, fmt, rc::Rc}; + +use zksync_contracts::BaseSystemContracts; +use zksync_test_account::{Account, TxType}; +use zksync_types::{ + utils::{deployed_address_create, storage_key_for_eth_balance}, + writes::StateDiffRecord, + Address, L1BatchNumber, StorageKey, Transaction, H256, U256, +}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, CurrentExecutionState, InspectExecutionMode, VmExecutionResultAndLogs, + VmInterfaceHistoryEnabled, +}; + +pub(crate) use self::transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; +use super::{get_empty_storage, read_test_contract}; +use crate::{ + interface::{ + storage::{InMemoryStorage, StoragePtr, StorageView}, + L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmFactory, VmInterfaceExt, + }, + versions::testonly::{ + default_l1_batch, default_system_env, make_address_rich, ContractToDeploy, + }, +}; + +mod transaction_test_info; + +/// VM tester that provides prefunded accounts, storage handle etc. +#[derive(Debug)] +pub(crate) struct VmTester { + pub(crate) vm: VM, + pub(crate) system_env: SystemEnv, + pub(crate) l1_batch_env: L1BatchEnv, + pub(crate) storage: StoragePtr>, + pub(crate) test_contract: Option
, + pub(crate) rich_accounts: Vec, +} + +impl VmTester { + pub(crate) fn deploy_test_contract(&mut self) { + let contract = read_test_contract(); + let account = &mut self.rich_accounts[0]; + let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; + let nonce = tx.nonce().unwrap().0.into(); + self.vm.push_transaction(tx); + self.vm.execute(InspectExecutionMode::OneTx); + let deployed_address = deployed_address_create(account.address, nonce); + self.test_contract = Some(deployed_address); + } + + pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { + self.vm.read_storage(storage_key_for_eth_balance(&address)) + } + + pub(crate) fn reset_with_empty_storage(&mut self) { + let mut storage = get_empty_storage(); + for account in &self.rich_accounts { + make_address_rich(&mut storage, account.address); + } + + let storage = StorageView::new(storage).to_rc_ptr(); + self.storage = storage.clone(); + self.vm = VM::new(self.l1_batch_env.clone(), self.system_env.clone(), storage); + } +} + +/// Builder for [`VmTester`]. +#[derive(Debug)] +pub(crate) struct VmTesterBuilder { + storage: Option, + l1_batch_env: Option, + system_env: SystemEnv, + rich_accounts: Vec, + custom_contracts: Vec, +} + +impl VmTesterBuilder { + pub(crate) fn new() -> Self { + Self { + storage: None, + l1_batch_env: None, + system_env: default_system_env(), + rich_accounts: vec![], + custom_contracts: vec![], + } + } + + pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { + self.system_env = system_env; + self + } + + pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { + self.l1_batch_env = Some(l1_batch_env); + self + } + + pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { + self.storage = Some(storage); + self + } + + pub(crate) fn with_base_system_smart_contracts( + mut self, + base_system_smart_contracts: BaseSystemContracts, + ) -> Self { + self.system_env.base_system_smart_contracts = base_system_smart_contracts; + self + } + + pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { + self.system_env.bootloader_gas_limit = gas_limit; + self + } + + pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { + self.system_env.execution_mode = execution_mode; + self + } + + pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { + self.storage = Some(get_empty_storage()); + self + } + + /// Creates the specified number of pre-funded accounts. + pub(crate) fn with_rich_accounts(mut self, number: u32) -> Self { + for i in 0..number { + self.rich_accounts.push(Account::from_seed(i)); + } + self + } + + pub(crate) fn rich_account(&self, index: usize) -> &Account { + &self.rich_accounts[index] + } + + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + self.custom_contracts = contracts; + self + } + + pub(crate) fn build(self) -> VmTester + where + VM: VmFactory>, + { + let l1_batch_env = self + .l1_batch_env + .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); + + let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); + ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); + let storage = StorageView::new(raw_storage).to_rc_ptr(); + for account in &self.rich_accounts { + make_address_rich(storage.borrow_mut().inner_mut(), account.address); + } + + let vm = VM::new( + l1_batch_env.clone(), + self.system_env.clone(), + storage.clone(), + ); + VmTester { + vm, + system_env: self.system_env, + l1_batch_env, + storage, + test_contract: None, + rich_accounts: self.rich_accounts.clone(), + } + } +} + +/// Test extensions for VM. +pub(crate) trait TestedVm: + VmFactory> + VmInterfaceHistoryEnabled +{ + type StateDump: fmt::Debug + PartialEq; + + fn dump_state(&self) -> Self::StateDump; + + fn gas_remaining(&mut self) -> u32; + + fn get_current_execution_state(&self) -> CurrentExecutionState; + + /// Unlike [`Self::known_bytecode_hashes()`], the output should only include successfully decommitted bytecodes. + fn decommitted_hashes(&self) -> HashSet; + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs; + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs; + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]); + + /// Includes bytecodes that have failed to decommit. Should exclude base system contract bytecodes (default AA / EVM emulator). + fn known_bytecode_hashes(&self) -> HashSet; + + /// Returns `true` iff the decommit is fresh. + fn manually_decommit(&mut self, code_hash: H256) -> bool; + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]); + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]); + + /// Reads storage accounting for changes made during the VM run. + fn read_storage(&mut self, key: StorageKey) -> U256; + + fn verify_required_storage(&mut self, cells: &[(StorageKey, U256)]) { + for &(key, expected_value) in cells { + assert_eq!( + self.read_storage(key), + expected_value, + "Unexpected storage value at {key:?}" + ); + } + } + + /// Returns the current hash of the latest L2 block. + fn last_l2_block_hash(&self) -> H256; + + /// Same as `start_new_l2_block`, but should skip consistency checks (to verify they are performed by the bootloader). + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv); + + /// Pushes a transaction with predefined refund value. + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs similarity index 87% rename from core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs rename to core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs index 5ad3376b2ec..222fb3b7331 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/testonly/tester/transaction_test_info.rs @@ -1,12 +1,12 @@ use zksync_types::{ExecuteTransactionCommon, Nonce, Transaction, H160}; +use super::{TestedVm, VmTester}; use crate::{ interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmRevertReason, + CurrentExecutionState, ExecutionResult, Halt, InspectExecutionMode, TxRevertReason, + VmExecutionResultAndLogs, VmInterfaceExt, VmRevertReason, }, - vm_latest::{tests::tester::vm_tester::VmTester, HistoryEnabled}, + versions::testonly::default_pubdata_builder, }; // FIXME: remove the dead code allow @@ -184,9 +184,7 @@ impl TransactionTestInfo { } } -impl VmTester { - // FIXME: remove allow dead code - #[allow(dead_code)] +impl VmTester { pub(crate) fn execute_and_verify_txs( &mut self, txs: &[TransactionTestInfo], @@ -194,7 +192,7 @@ impl VmTester { for tx_test_info in txs { self.execute_tx_and_verify(tx_test_info.clone()); } - self.vm.execute(VmExecutionMode::Batch); + self.vm.finish_batch(default_pubdata_builder()); let mut state = self.vm.get_current_execution_state(); state.used_contract_hashes.sort(); state @@ -204,19 +202,29 @@ impl VmTester { &mut self, tx_test_info: TransactionTestInfo, ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result + execute_tx_and_verify(&mut self.vm, tx_test_info) + } +} + +fn execute_tx_and_verify( + vm: &mut impl TestedVm, + tx_test_info: TransactionTestInfo, +) -> VmExecutionResultAndLogs { + let inner_state_before = vm.dump_state(); + vm.make_snapshot(); + vm.push_transaction(tx_test_info.tx.clone()); + let result = vm.execute(InspectExecutionMode::OneTx); + tx_test_info.verify_result(&result); + if tx_test_info.should_rollback() { + vm.rollback_to_the_latest_snapshot(); + let inner_state_after = vm.dump_state(); + pretty_assertions::assert_eq!( + inner_state_before, + inner_state_after, + "Inner state before and after rollback should be equal" + ); + } else { + vm.pop_snapshot_no_rollback(); } + result } diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs similarity index 55% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs rename to core/lib/multivm/src/versions/testonly/tracing_execution_error.rs index 138e8041e6a..e87e6eb7c06 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/testonly/tracing_execution_error.rs @@ -1,33 +1,45 @@ -use zksync_types::{Execute, H160}; +use zksync_contracts::load_contract; +use zksync_types::{Address, Execute}; +use super::{ + read_error_contract, tester::VmTesterBuilder, ContractToDeploy, TestedVm, BASE_SYSTEM_CONTRACTS, +}; use crate::{ interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_2::tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::tester::{ExpectedError, TransactionTestInfo}, }; -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) +fn get_execute_error_calldata() -> Vec { + let test_contract = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", + ); + let function = test_contract.function("require_short").unwrap(); + function + .encode_input(&[]) + .expect("failed to encode parameters") +} + +pub(crate) fn test_tracing_of_execution_errors() { + let contract_address = Address::repeat_byte(1); + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new( + read_error_contract(), + contract_address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), - factory_deps: Some(vec![]), + factory_deps: vec![], }, None, ); diff --git a/core/lib/multivm/src/versions/testonly/transfer.rs b/core/lib/multivm/src/versions/testonly/transfer.rs new file mode 100644 index 00000000000..3572adba147 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly/transfer.rs @@ -0,0 +1,208 @@ +use ethabi::Token; +use zksync_contracts::{load_contract, read_bytecode}; +use zksync_types::{utils::storage_key_for_eth_balance, Address, Execute, U256}; +use zksync_utils::u256_to_h256; + +use super::{ + default_pubdata_builder, get_empty_storage, tester::VmTesterBuilder, ContractToDeploy, TestedVm, +}; +use crate::interface::{InspectExecutionMode, TxExecutionMode, VmInterfaceExt}; + +enum TestOptions { + Send(U256), + Transfer(U256), +} + +fn test_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let recipient_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + + let test_contract_address = Address::repeat_byte(1); + let recipient_address = Address::repeat_byte(2); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) + .unwrap(), + ), + }; + + let mut storage = get_empty_storage(); + storage.set_value( + storage_key_for_eth_balance(&test_contract_address), + u256_to_h256(value), + ); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(recipient_bytecode, recipient_address), + ]) + .build::(); + + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata, + value: U256::zero(), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx); + let tx_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !tx_result.result.is_failed(), + "Transaction wasn't successful" + ); + + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); + + let new_recipient_balance = vm.get_eth_balance(recipient_address); + assert_eq!(new_recipient_balance, value); +} + +pub(crate) fn test_send_and_transfer() { + test_send_or_transfer::(TestOptions::Send(U256::zero())); + test_send_or_transfer::(TestOptions::Send(U256::from(10).pow(18.into()))); + test_send_or_transfer::(TestOptions::Transfer(U256::zero())); + test_send_or_transfer::(TestOptions::Transfer(U256::from(10).pow(18.into()))); +} + +fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { + let test_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipient_bytecode = read_bytecode( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + let test_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", + ); + let reentrant_recipient_abi = load_contract( + "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", + ); + + let test_contract_address = Address::repeat_byte(1); + let reentrant_recipient_address = Address::repeat_byte(2); + + let (value, calldata) = match test_option { + TestOptions::Send(value) => ( + value, + test_abi + .function("send") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipient_address), + Token::Uint(value), + ]) + .unwrap(), + ), + TestOptions::Transfer(value) => ( + value, + test_abi + .function("transfer") + .unwrap() + .encode_input(&[ + Token::Address(reentrant_recipient_address), + Token::Uint(value), + ]) + .unwrap(), + ), + }; + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .with_custom_contracts(vec![ + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), + ]) + .build::(); + + // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. + let account = &mut vm.rich_accounts[0]; + let tx1 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(reentrant_recipient_address), + calldata: reentrant_recipient_abi + .function("setX") + .unwrap() + .encode_input(&[]) + .unwrap(), + value: U256::from(1), + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx1); + let tx1_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + !tx1_result.result.is_failed(), + "Transaction 1 wasn't successful" + ); + + let tx2 = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(test_contract_address), + calldata, + value, + factory_deps: vec![], + }, + None, + ); + + vm.vm.push_transaction(tx2); + let tx2_result = vm.vm.execute(InspectExecutionMode::OneTx); + assert!( + tx2_result.result.is_failed(), + "Transaction 2 should have failed, but it succeeded" + ); + + let batch_result = vm + .vm + .finish_batch(default_pubdata_builder()) + .block_tip_execution_result; + assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +} + +pub(crate) fn test_reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_or_transfer::(TestOptions::Send(U256::zero())); + test_reentrancy_protection_send_or_transfer::(TestOptions::Send( + U256::from(10).pow(18.into()), + )); + test_reentrancy_protection_send_or_transfer::(TestOptions::Transfer(U256::zero())); + test_reentrancy_protection_send_or_transfer::(TestOptions::Transfer( + U256::from(10).pow(18.into()), + )); +} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs b/core/lib/multivm/src/versions/testonly/upgrade.rs similarity index 72% rename from core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs rename to core/lib/multivm/src/versions/testonly/upgrade.rs index 2af2928b1c4..359f19faedb 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/testonly/upgrade.rs @@ -1,52 +1,43 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; use zksync_test_account::TxType; use zksync_types::{ ethabi::{Contract, Token}, get_code_key, get_known_code_key, protocol_upgrade::ProtocolUpgradeTxCommonData, Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, + CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H256, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, }; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_2::tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, +use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; + +use super::{ + get_complex_upgrade_abi, get_empty_storage, read_complex_upgrade, read_test_contract, + tester::VmTesterBuilder, TestedVm, +}; +use crate::interface::{ + ExecutionResult, Halt, InspectExecutionMode, TxExecutionMode, VmInterfaceExt, }; /// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: /// - This transaction must be the only one in block /// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - +pub(crate) fn test_protocol_upgrade_is_first() { + let mut storage = get_empty_storage(); let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); // Here we just use some random transaction of protocol upgrade type: let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { // The bytecode hash to put on an address bytecode_hash, // The address on which to deploy the bytecode hash to - address: H160::random(), + address: Address::repeat_byte(1), // Whether to run the constructor on the force deployment call_constructor: false, // The value with which to initialize a contract @@ -60,7 +51,7 @@ fn test_protocol_upgrade_is_first() { // The bytecode hash to put on an address bytecode_hash, // The address on which to deploy the bytecode hash to - address: H160::random(), + address: Address::repeat_byte(2), // Whether to run the constructor on the force deployment call_constructor: false, // The value with which to initialize a contract @@ -82,9 +73,9 @@ fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(normal_l1_transaction.clone()); vm.vm.push_transaction(another_protocol_upgrade_transaction); - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( result.result, ExecutionResult::Halt { @@ -98,8 +89,8 @@ fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(normal_l1_transaction.clone()); vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert_eq!( result.result, ExecutionResult::Halt { @@ -112,31 +103,26 @@ fn test_protocol_upgrade_is_first() { vm.vm.push_transaction(protocol_upgrade_transaction); vm.vm.push_transaction(normal_l1_transaction); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); + vm.vm.execute(InspectExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed()); } /// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); +pub(crate) fn test_force_deploy_upgrade() { + let mut storage = get_empty_storage(); let bytecode_hash = hash_bytecode(&read_test_contract()); - let known_code_key = get_known_code_key(&bytecode_hash); // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); + storage.set_value(known_code_key, u256_to_h256(1.into())); + + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); - let address_to_deploy = H160::random(); + let address_to_deploy = Address::repeat_byte(1); // Here we just use some random transaction of protocol upgrade type: let transaction = get_forced_deploy_tx(&[ForceDeployment { // The bytecode hash to put on an address @@ -153,65 +139,46 @@ fn test_force_deploy_upgrade() { vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "The force upgrade was not successful" ); - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - + let expected_slots = [( + get_code_key(&address_to_deploy), + h256_to_u256(bytecode_hash), + )]; // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); + vm.vm.verify_required_storage(&expected_slots); } -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - +/// Here we show how the work with the complex upgrader could be done. +pub(crate) fn test_complex_upgrader() { + let mut storage = get_empty_storage(); let bytecode_hash = hash_bytecode(&read_complex_upgrade()); let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - // Let's assume that the bytecode for the implementation of the complex upgrade // is already deployed in some address in user space - let upgrade_impl = H160::random(); + let upgrade_impl = Address::repeat_byte(1); let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( + storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); + storage.set_value( get_known_code_key(&msg_sender_test_hash), u256_to_h256(1.into()), ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); + storage.set_value(account_code_key, bytecode_hash); + storage.store_factory_dep(bytecode_hash, read_complex_upgrade()); + storage.store_factory_dep(msg_sender_test_hash, read_msg_sender_test()); - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); + let mut vm = VmTesterBuilder::new() + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + + let address_to_deploy1 = Address::repeat_byte(0xfe); + let address_to_deploy2 = Address::repeat_byte(0xff); let transaction = get_complex_upgrade_tx( upgrade_impl, @@ -221,19 +188,24 @@ fn test_complex_upgrader() { ); vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!( !result.result.is_failed(), "The force upgrade was not successful" ); - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), + let expected_slots = [ + ( + get_code_key(&address_to_deploy1), + h256_to_u256(bytecode_hash), + ), + ( + get_code_key(&address_to_deploy2), + h256_to_u256(bytecode_hash), + ), ]; - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); + vm.vm.verify_required_storage(&expected_slots); } #[derive(Debug, Clone)] @@ -274,9 +246,9 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, - factory_deps: None, + factory_deps: vec![], value: U256::zero(), }; @@ -324,9 +296,9 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, - factory_deps: None, + factory_deps: vec![], value: U256::zero(), }; diff --git a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs index da4e2f5350f..7870b1ff744 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_3::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -221,13 +221,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - pub(crate) fn calculate_computational_gas_used< S: WriteStorage, T: PubdataSpentTracer, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 5692f103da3..d9768652c2f 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -1,14 +1,15 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::Transaction; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, - L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, + L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::old::TracerDispatcher, @@ -22,22 +23,45 @@ pub struct Vm { pub(crate) system_env: SystemEnv, } +impl Vm { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } +} + impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { - crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( - &mut self.vm, - &tx, - self.system_env.execution_mode.glue_into(), - None, - ) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + let compressed_bytecodes = + crate::vm_1_3_2::vm_with_bootloader::push_transaction_to_bootloader_memory( + &mut self.vm, + &tx, + self.system_env.execution_mode.glue_into(), + None, + ); + PushTransactionResult { + compressed_bytecodes: compressed_bytecodes.into(), + } } fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -46,7 +70,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => { + InspectExecutionMode::OneTx => { match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer @@ -69,8 +93,7 @@ impl VmInterface for Vm { .glue_into(), } } - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -160,24 +183,7 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_1_3_2::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs index d1acdf7708e..fd4d483fba5 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm_with_bootloader.rs @@ -442,7 +442,7 @@ pub fn get_bootloader_memory( let mut previous_compressed: usize = 0; let mut already_included_txs_size = 0; for (tx_index_in_block, tx) in txs.into_iter().enumerate() { - let compressed_bytecodes = predefined_compressed_bytecodes[tx_index_in_block].clone(); + let compressed_bytecodes = &predefined_compressed_bytecodes[tx_index_in_block]; let mut total_compressed_len_words = 0; for i in compressed_bytecodes.iter() { @@ -475,7 +475,7 @@ pub fn push_transaction_to_bootloader_memory( tx: &Transaction, execution_mode: TxExecutionMode, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx: TransactionData = tx.clone().into(); let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); @@ -485,7 +485,7 @@ pub fn push_transaction_to_bootloader_memory( execution_mode, overhead, explicit_compressed_bytecodes, - ); + ) } pub fn push_raw_transaction_to_bootloader_memory( @@ -494,7 +494,7 @@ pub fn push_raw_transaction_to_bootloader_memory>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -555,7 +555,7 @@ pub fn push_raw_transaction_to_bootloader_memory, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); @@ -604,7 +605,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( predefined_overhead: u32, trusted_gas_limit: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let mut memory: Vec<(usize, U256)> = Vec::default(); let bootloader_description_offset = @@ -640,8 +641,8 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; let memory_addition: Vec<_> = compressed_bytecodes - .into_iter() - .flat_map(|x| bytecode::encode_call(&x)) + .iter() + .flat_map(bytecode::encode_call) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs index 393eb043cb7..1acf75b27e1 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs index 2160c4b56a0..cc199fef941 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs @@ -99,6 +99,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs index 71ae20d4406..3a3b22ea246 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs deleted file mode 100644 index ba699e7558b..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs +++ /dev/null @@ -1,284 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::{ - aux_structures::Timestamp, zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK, -}; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_BATCH_TIP_OVERHEAD, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - HistoryEnabled, TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -fn execute_test(test_data: L1MessengerTestData) -> u32 { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs, - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!(!result.result.is_failed(), "Batch wasn't successful"); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - ergs_before - ergs_after -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -#[test] -fn test_dry_run_upper_bound() { - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let max_logs = execute_test(L1MessengerTestData { - l2_to_l1_logs: L2ToL1Log::MIN_L2_L1_LOGS_TREE_SIZE, - ..Default::default() - }); - - let max_messages = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; 0]; MAX_PUBDATA_PER_BLOCK as usize / L2ToL1Log::SERIALIZED_SIZE], - ..Default::default() - }); - - let long_message = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize]; 1], - ..Default::default() - }); - - let max_bytecodes = execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long - bytecodes: vec![vec![0; 32]; MAX_PUBDATA_PER_BLOCK as usize / 32], - ..Default::default() - }); - - let long_bytecode = execute_test(L1MessengerTestData { - // We have to add 48 since a valid bytecode must have an odd number of 32 byte words - bytecodes: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize + 48]; 1], - ..Default::default() - }); - - let lots_of_small_repeated_writes = execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_PUBDATA_PER_BLOCK as usize / 5), - ..Default::default() - }); - - let lots_of_big_repeated_writes = execute_test(L1MessengerTestData { - // Each big write will approximately require 32 bytes to encode - state_diffs: generate_state_diffs(true, false, MAX_PUBDATA_PER_BLOCK as usize / 32), - ..Default::default() - }); - - let lots_of_small_initial_writes = execute_test(L1MessengerTestData { - // Each initial write will take at least 32 bytes for derived key + 5 bytes for value - state_diffs: generate_state_diffs(false, true, MAX_PUBDATA_PER_BLOCK as usize / 37), - ..Default::default() - }); - - let lots_of_large_initial_writes = execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_PUBDATA_PER_BLOCK as usize / 64), - ..Default::default() - }); - - let max_used_gas = vec![ - max_logs, - max_messages, - long_message, - max_bytecodes, - long_bytecode, - lots_of_small_repeated_writes, - lots_of_big_repeated_writes, - lots_of_small_initial_writes, - lots_of_large_initial_writes, - ] - .into_iter() - .max() - .unwrap(); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - assert!( - max_used_gas * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs deleted file mode 100644 index 47e047ebbf7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs deleted file mode 100644 index 9db5e7326e7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs deleted file mode 100644 index 1a4c026a23f..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs deleted file mode 100644 index ecc2fdfe6c0..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled}, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.00226, 0.00077, 0.1195, 0.1429, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs deleted file mode 100644 index be8e253c6d8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs +++ /dev/null @@ -1,78 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs deleted file mode 100644 index 9dfda9e1a68..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs deleted file mode 100644 index a7cbcd8e295..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs deleted file mode 100644 index 75517138db3..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_1::tests::tester::VmTesterBuilder; -use crate::vm_1_4_1::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_1::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs deleted file mode 100644 index 7644064f4af..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs deleted file mode 100644 index 83e0f1715b8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs +++ /dev/null @@ -1,189 +0,0 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} - -#[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS)DRESS)DRESS), - value: 0.into(), - factory_deps: None, - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs deleted file mode 100644 index a07608121bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs deleted file mode 100644 index 915a802b1e8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs deleted file mode 100644 index 37e871fbc70..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs deleted file mode 100644 index 8700eb14b53..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs +++ /dev/null @@ -1,166 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs deleted file mode 100644 index aebc956e673..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs deleted file mode 100644 index 2ae942c2652..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs deleted file mode 100644 index 384bc4cf325..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs deleted file mode 100644 index 11e9d7fd6df..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_1::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d980..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs deleted file mode 100644 index 443acf71676..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_1::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs deleted file mode 100644 index 24bd0b4d0bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs deleted file mode 100644 index 02c7590c1be..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_1::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs deleted file mode 100644 index af3701d919f..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs +++ /dev/null @@ -1,355 +0,0 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs deleted file mode 100644 index da69c107a20..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_1::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs index 238804bc7fc..6f927c5c99a 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_1::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_1_4_1::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs index d07732ae435..c1ca93152a0 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 68c8e92a03a..af483feedd7 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -1,17 +1,19 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_1::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_1::{ @@ -82,18 +84,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -124,12 +131,12 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs index 600ab83bf48..182f6eff441 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs index d42d1880933..f6e49cd8b14 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/execution.rs @@ -96,6 +96,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs index 92a2eaa650c..754b8476182 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs deleted file mode 100644 index 8578b73ccfa..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs +++ /dev/null @@ -1,399 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful for input: {:?}", - test_data - ); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used - ); - - TestStatistics { - max_used_gas: ergs_before - ergs_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} - -#[test] -#[allow(clippy::vec_init_then_push)] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let mut statistics = Vec::new(); - - // max logs - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }); - - // max messages - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }); - - // long message - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }); - - // max bytecodes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }); - - // long bytecode - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; 1], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }); - - // lots of small repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }); - - // lots of big repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(true, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }); - - // lots of small initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs(false, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }); - - // lots of large initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs deleted file mode 100644 index dd91d6d94a9..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs +++ /dev/null @@ -1,40 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs deleted file mode 100644 index 2fafb7e51aa..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs deleted file mode 100644 index b84e9d32126..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs +++ /dev/null @@ -1,44 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs deleted file mode 100644 index cfe3e1bfc23..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs deleted file mode 100644 index c79fcd8ba8e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_2::tests::tester::VmTesterBuilder; -use crate::vm_1_4_2::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_2::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs deleted file mode 100644 index f722890f474..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs deleted file mode 100644 index a07608121bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs deleted file mode 100644 index 9f1be4ec947..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs +++ /dev/null @@ -1,187 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_2::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs deleted file mode 100644 index 0a799288204..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs +++ /dev/null @@ -1,135 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs deleted file mode 100644 index 5586450f34b..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_test_account::TxType; -use zksync_types::{utils::deployed_address_create, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::PrestateTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, -}; - -#[test] -fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - vm.deploy_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm.test_contract.unwrap(), - false, - Default::default(), - true, - TxType::L2, - ); - vm.vm.push_transaction(tx1); - - let contract_address = vm.test_contract.unwrap(); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - assert!(prestate_result.1.contains_key(&contract_address)); -} - -#[test] -fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); - vm.test_contract = Some(deployed_address); - - // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce2 = tx2.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); - - let account = &mut vm.rich_accounts[0]; - - //enter ether to contract to see difference in the balance post execution - let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), - calldata: Default::default(), - value: U256::from(100000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); - - let tx1 = Execute { - contract_address: deployed_address2, - calldata: Default::default(), - value: U256::from(200000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx1, None)); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - //assert that the pre-state contains both deployed contracts with balance zero - assert!(prestate_result.0.contains_key(&deployed_address)); - assert!(prestate_result.0.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.0[&deployed_address].balance, - Some(U256::zero()) - ); - assert_eq!( - prestate_result.0[&deployed_address2].balance, - Some(U256::zero()) - ); - - //assert that the post-state contains both deployed contracts with the correct balance - assert!(prestate_result.1.contains_key(&deployed_address)); - assert!(prestate_result.1.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.1[&deployed_address].balance, - Some(U256::from(100000)) - ); - assert_eq!( - prestate_result.1[&deployed_address2].balance, - Some(U256::from(200000)) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs deleted file mode 100644 index 401c2c12a43..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm.push_raw_transaction( - tx.clone(), - overhead, - result.refunds.gas_refunded as u32, - true, - ); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund as u32, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs deleted file mode 100644 index 2ce18cc0136..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs deleted file mode 100644 index d6c072d1b1e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_2::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d980..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs deleted file mode 100644 index cb81c4c5ed7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_2::tests::tester::vm_tester::VmTester, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs deleted file mode 100644 index 44f861f8d33..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs deleted file mode 100644 index 5655e90fb4e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_2::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs index ffe65b5e050..6c4f737f9e9 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_1::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_1_4_2::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs index d07732ae435..c1ca93152a0 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index d6e1fbc68a8..e7c8e7acdd9 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -1,19 +1,19 @@ -use std::mem; +use std::{mem, rc::Rc}; use circuit_sequencer_api_1_4_2::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_2::{ @@ -84,18 +84,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(mem::take(tracer), execution_mode, None) + self.inspect_inner(mem::take(tracer), execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -126,12 +131,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(TracerDispatcher::default(), VmExecutionMode::Batch, None); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs index 1a1c620c2b2..c97d3ff30e4 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs index 79669eddd56..b8b939f8673 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs @@ -93,6 +93,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs index 46f8bc2f400..015d5acd340 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs deleted file mode 100644 index 57229abb097..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs deleted file mode 100644 index ad1b0f26036..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs deleted file mode 100644 index e9df4fa80ff..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs deleted file mode 100644 index b0cffa7d3c8..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs +++ /dev/null @@ -1,66 +0,0 @@ -use circuit_sequencer_api_1_4_0::geometry_config::get_geometry_config; -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let statistic = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - assert!(statistic.main_vm > f32::EPSILON); - assert!(statistic.ram_permutation > f32::EPSILON); - assert!(statistic.storage_application > f32::EPSILON); - assert!(statistic.storage_sorter > f32::EPSILON); - assert!(statistic.code_decommitter > f32::EPSILON); - assert!(statistic.code_decommitter_sorter > f32::EPSILON); - assert!(statistic.log_demuxer > f32::EPSILON); - assert!(statistic.events_sorter > f32::EPSILON); - assert!(statistic.keccak256 > f32::EPSILON); - // Single `ecrecover` should be used to validate tx signature. - assert_eq!( - statistic.ecrecover, - 1.0 / get_geometry_config().cycles_per_ecrecover_circuit as f32 - ); - // `sha256` shouldn't be used. - assert_eq!(statistic.sha256, 0.0); - - const EXPECTED_CIRCUITS_USED: f32 = 4.6363; - let delta = (statistic.total_f32() - EXPECTED_CIRCUITS_USED) / EXPECTED_CIRCUITS_USED; - - if delta.abs() > 0.1 { - panic!( - "Estimation differs from expected result by too much: {}%, expected value: {}, got {}", - delta * 100.0, - EXPECTED_CIRCUITS_USED, - statistic.total_f32(), - ); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs deleted file mode 100644 index a8c20cfebc1..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs +++ /dev/null @@ -1,76 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs deleted file mode 100644 index 637fd94c1c8..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs deleted file mode 100644 index 658bcd75b05..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs deleted file mode 100644 index 079e6d61b6c..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_boojum_integration::tests::tester::VmTesterBuilder; -use crate::vm_boojum_integration::types::inputs::system_env::TxExecutionMode; -use crate::vm_boojum_integration::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs deleted file mode 100644 index 67901490edf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs deleted file mode 100644 index b547f346d28..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs +++ /dev/null @@ -1,139 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rolling hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in L1Messenger - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs deleted file mode 100644 index d637d583c0e..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_0::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs deleted file mode 100644 index 95377232b3e..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs deleted file mode 100644 index 44ba3e4e323..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs deleted file mode 100644 index 516331d574f..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_0::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 sha256 calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 ecrecover call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs deleted file mode 100644 index 521bd81f2ef..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs +++ /dev/null @@ -1,167 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs deleted file mode 100644 index 90c3206b24b..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs deleted file mode 100644 index cfaf1952c70..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_0::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs deleted file mode 100644 index f6b1d83e02a..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs deleted file mode 100644 index 078a971e4bf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_0::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::{ - vm_boojum_integration::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs deleted file mode 100644 index 4d6572fe78a..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_boojum_integration::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs deleted file mode 100644 index fcea03e12cc..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs +++ /dev/null @@ -1,295 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs deleted file mode 100644 index 8c538dcf9bf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_boojum_integration::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs deleted file mode 100644 index bc3d62f62a1..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs +++ /dev/null @@ -1,362 +0,0 @@ -use zk_evm_1_4_0::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::read_test_contract; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{tester::VmTesterBuilder, utils::verify_required_storage}, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs deleted file mode 100644 index 4fba188ac5b..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs +++ /dev/null @@ -1,111 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_boojum_integration::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs index 326a5789612..2f7d141cb0a 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/tracers/pubdata_tracer.rs @@ -10,6 +10,7 @@ use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; use crate::{ interface::{ + pubdata::L1MessengerL2ToL1Log, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -17,7 +18,7 @@ use crate::{ tracers::dynamic::vm_1_4_0::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_boojum_integration::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs index 9df9009831f..152ccad2fbc 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index 17ce8365a0a..43c9900486d 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -1,17 +1,19 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_4_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, Transaction, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_boojum_integration::{ @@ -82,18 +84,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -125,12 +132,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs index f280f56a828..770f232019b 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -171,8 +171,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs index b48ec7eacb0..f588f20ab25 100644 --- a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -1,5 +1,5 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_vm2::interface::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm2::interface::{CycleStats, GlobalStateInterface, Opcode, OpcodeType, Tracer}; use zksync_vm_interface::CircuitStatistic; use crate::vm_latest::tracers::circuits_capacity::*; @@ -24,7 +24,7 @@ pub struct CircuitsTracer { } impl Tracer for CircuitsTracer { - fn after_instruction(&mut self, _state: &mut S) { + fn after_instruction(&mut self, _: &mut S) { self.main_vm_cycles += 1; match OP::VALUE { diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs index d4804a98c25..733ca9d82fc 100644 --- a/core/lib/multivm/src/versions/vm_fast/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -12,7 +12,7 @@ mod initial_bootloader_memory; mod pubdata; mod refund; // FIXME(EVM-711): restore tests for fast VM once it is integrated -// #[cfg(test)] -// mod tests; +#[cfg(test)] +mod tests; mod transaction_data; mod vm; diff --git a/core/lib/multivm/src/versions/vm_fast/pubdata.rs b/core/lib/multivm/src/versions/vm_fast/pubdata.rs index d07732ae435..c1ca93152a0 100644 --- a/core/lib/multivm/src/versions/vm_fast/pubdata.rs +++ b/core/lib/multivm/src/versions/vm_fast/pubdata.rs @@ -1,6 +1,6 @@ use zksync_types::writes::{compress_state_diffs, StateDiffRecord}; -use crate::utils::events::L1MessengerL2ToL1Log; +use crate::interface::pubdata::L1MessengerL2ToL1Log; /// Struct based on which the pubdata blob is formed #[derive(Debug, Clone, Default)] diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index dd407c61668..bb66eb2f770 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -1,392 +1,6 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use itertools::Itertools; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, - l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use super::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{get_complex_upgrade_abi, read_complex_upgrade}, -}; -use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::default_l1_batch, - vm_latest::constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -const CALLS_PER_TX: usize = 1_000; -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .chunks(CALLS_PER_TX) - .into_iter() - .map(|chunk| { - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(chunk.collect_vec())]) - .unwrap() - }) - .collect_vec(); - - encoded_calls -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute - // the gas limit - let batch_env = L1BatchEnv { - fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), - ..default_l1_batch(zksync_types::L1BatchNumber(1)) - }; - - let mut vm = VmTesterBuilder::new() - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_l1_batch_env(batch_env) - .build(); - - let bytecodes = test_data.bytecodes.iter().map(Vec::as_slice); - vm.vm.insert_bytecodes(bytecodes); - - let txs_data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - - for (i, data) in txs_data.into_iter().enumerate() { - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), - calldata: data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {test_data:#?}" - ); - } - - // Now we count how much gas was spent at the end of the batch - // It is assumed that the top level frame is the bootloader - vm.vm.enforce_state_diffs(test_data.state_diffs.clone()); - let gas_before = vm.vm.gas_remaining(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {test_data:?}" - ); - let gas_after = vm.vm.gas_remaining(); - assert_eq!((gas_before - gas_after) as u64, result.statistics.gas_used); - - TestStatistics { - max_used_gas: gas_before - gas_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} +use crate::{versions::testonly::block_tip::test_dry_run_upper_bound, vm_fast::Vm}; #[test] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let statistics = vec![ - // max logs - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }, - // max messages - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }, - // long message - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }, - // max bytecodes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }, - // long bytecode - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![ - vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; - 1 - ], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }, - // lots of small repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }, - // lots of big repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - true, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, - ), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }, - // lots of small initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs( - false, - true, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, - ), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }, - // lots of large initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - false, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, - ), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }, - ]; - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); +fn dry_run_upper_bound() { + test_dry_run_upper_bound::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index 48e1b10de44..6075aea0989 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,52 +1,14 @@ -use assert_matches::assert_matches; -use zksync_types::U256; -use zksync_vm2::interface::HeapId; - use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, - versions::vm_fast::tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::bootloader::{test_bootloader_out_of_gas, test_dummy_bootloader}, + vm_fast::Vm, }; #[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - - verify_required_memory(&vm.vm.inner, vec![(correct_first_cell, HeapId::FIRST, 0)]); +fn dummy_bootloader() { + test_dummy_bootloader::>(); } #[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); +fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs index 3070140c00b..8a662c38827 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bytecode_publishing.rs @@ -1,38 +1,6 @@ -use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::bytecode, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; +use crate::{versions::testonly::bytecode_publishing::test_bytecode_publishing, vm_fast::Vm}; #[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); +fn bytecode_publishing() { + test_bytecode_publishing::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs deleted file mode 100644 index c97b38b6afc..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index f40e5336eb3..e7521d87c1c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -1,74 +1,6 @@ -use zksync_types::{Address, Execute, U256}; +use crate::{versions::testonly::circuits::test_circuits, vm_fast::Vm}; -use super::tester::VmTesterBuilder; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. #[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(Address::random()), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed(), "{res:#?}"); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 13] = [ - 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, - 0.0, 0.0, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - (s.secp256k1_verify, "secp256k1_verify"), - (s.transient_storage_checker, "transient_storage_checker"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } +fn circuits() { + test_circuits::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index 34342d7f3b8..4ef86128734 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -1,252 +1,21 @@ -use ethabi::Token; -use zksync_types::{ - get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, - }, - CircuitsTracer, + versions::testonly::code_oracle::{ + test_code_oracle, test_code_oracle_big_bytecode, test_refunds_in_code_oracle, }, + vm_fast::Vm, }; -fn generate_large_bytecode() -> Vec { - // This is the maximal possible size of a zkEVM bytecode - vec![2u8; ((1 << 16) - 1) * 32] -} - #[test] -fn test_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - // Filling the zkevm bytecode - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode, - precompiles_contract_address, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // Now, we ask for the same bytecode. We use to partially check whether the memory page with - // the decommitted bytecode gets erased (it shouldn't). - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); -} - -fn find_code_oracle_cost_log( - precompiles_contract_address: Address, - logs: &[StorageLogWithPreviousValue], -) -> &StorageLogWithPreviousValue { - logs.iter() - .find(|log| { - *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() - }) - .expect("no code oracle cost log") +fn code_oracle() { + test_code_oracle::>(); } #[test] -fn test_code_oracle_big_bytecode() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let big_zkevm_bytecode = generate_large_bytecode(); - let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); - let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); - - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&big_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode, - precompiles_contract_address, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.insert_bytecodes([big_zkevm_bytecode.as_slice()]); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); +fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::>(); } #[test] fn refunds_in_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - // Execute code oracle twice with identical VM state that only differs in that the queried bytecode - // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas - // for already decommitted codes). - let mut oracle_costs = vec![]; - for decommit in [false, true] { - let mut vm = VmTesterBuilder::new() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - precompile_contract_bytecode.clone(), - precompiles_contract_address, - )]) - .with_storage(storage.clone()) - .build(); - - vm.vm.insert_bytecodes([normal_zkevm_bytecode.as_slice()]); - - let account = &mut vm.rich_accounts[0]; - if decommit { - let (_, is_fresh) = vm.vm.inner.world_diff_mut().decommit_opcode( - &mut vm.vm.world, - &mut ((), CircuitsTracer::default()), - h256_to_u256(normal_zkevm_bytecode_hash), - ); - assert!(is_fresh); - } - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - let log = - find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); - oracle_costs.push(log.log.value); - } - - // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` - // in `CodeOracle.yul`. - let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); - assert_eq!( - code_oracle_refund, - (4 * (normal_zkevm_bytecode.len() / 32)).into() - ); + test_refunds_in_code_oracle::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs index c2ce02d39fe..c3cfd8b29f3 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/default_aa.rs @@ -1,81 +1,6 @@ -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - vm_latest::utils::fee::get_batch_base_fee, -}; +use crate::{versions::testonly::default_aa::test_default_aa_interaction, vm_fast::Vm}; #[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = [ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage( - &expected_slots, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &vm.fee_account, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); +fn default_aa_interaction() { + test_default_aa_interaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index 3f0a47b980e..6ba55f8e1f8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -1,39 +1,6 @@ -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Execute}; +use crate::{versions::testonly::gas_limit::test_tx_gas_limit_offset, vm_fast::Vm}; -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_fast::tests::tester::VmTesterBuilder, - vm_latest::constants::{TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. #[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Account::default_fee() - }), - ); - - vm.vm.push_transaction(tx); - - assert!(!vm.vm.has_previous_far_calls()); - let gas_limit_from_memory = vm - .vm - .read_word_from_bootloader_heap(TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET); - - assert_eq!(gas_limit_from_memory, gas_limit); +fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 62fa82f52f2..5ec30907ed5 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -1,235 +1,22 @@ -use std::{collections::HashSet, iter}; - -use assert_matches::assert_matches; -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{AccountTreeId, Address, Execute, StorageKey, H256, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - use crate::{ - interface::{ - storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, - }, - versions::testonly::ContractToDeploy, - vm_fast::{ - tests::{ - tester::{TxType, VmTester, VmTesterBuilder}, - utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - vm::Vm, + versions::testonly::get_used_contracts::{ + test_get_used_contracts, test_get_used_contracts_with_far_call, + test_get_used_contracts_with_out_of_gas_far_call, }, + vm_fast::Vm, }; #[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_decommitted_hashes()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .decommitted_hashes() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm.decommitted_hashes().collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_decommitted_hashes()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata: big_calldata, - value: Default::default(), - factory_deps: vec![vec![1; 32]], - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm).contains(&hash_to_u256)); - assert!(!vm.vm.decommitted_hashes().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code(vm: &Vm) -> HashSet { - let mut known_bytecodes_without_aa_code = vm - .world - .bytecode_cache - .keys() - .cloned() - .collect::>(); - known_bytecodes_without_aa_code.remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)); - known_bytecodes_without_aa_code -} - -/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial -/// decommitment cost (>10,000 gas). -fn inflated_counter_bytecode() -> Vec { - let mut counter_bytecode = read_test_contract(); - counter_bytecode.extend( - iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) - .take(10_000) - .flatten(), - ); - counter_bytecode -} - -#[derive(Debug)] -struct ProxyCounterData { - proxy_counter_address: Address, - counter_bytecode_hash: U256, -} - -fn execute_proxy_counter(gas: u32) -> (VmTester<()>, ProxyCounterData, VmExecutionResultAndLogs) { - let counter_bytecode = inflated_counter_bytecode(); - let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); - let counter_address = Address::repeat_byte(0x23); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_custom_contracts(vec![ContractToDeploy::new( - counter_bytecode, - counter_address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx( - &proxy_counter_bytecode, - Some(&[Token::Address(counter_address)]), - TxType::L2, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - !decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(deploy_tx.address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - let data = ProxyCounterData { - proxy_counter_address: deploy_tx.address, - counter_bytecode_hash, - }; - (vm, data, exec_result) +fn get_used_contracts() { + test_get_used_contracts::>(); } #[test] fn get_used_contracts_with_far_call() { - let (vm, data, exec_result) = execute_proxy_counter(100_000); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - decommitted_hashes.contains(&data.counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_far_call::>(); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (mut vm, data, exec_result) = execute_proxy_counter(10_000); - assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); - let decommitted_hashes = vm.vm.decommitted_hashes().collect::>(); - assert!( - decommitted_hashes.contains(&data.counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - // Execute another transaction with a successful far call and check that it's still charged for decommitment. - let account = &mut vm.rich_accounts[0]; - let (_, proxy_counter_abi) = read_proxy_counter_contract(); - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(data.proxy_counter_address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let proxy_counter_cost_key = StorageKey::new( - AccountTreeId::new(data.proxy_counter_address), - H256::from_low_u64_be(1), - ); - let far_call_cost_log = exec_result - .logs - .storage_logs - .iter() - .find(|log| log.log.key == proxy_counter_cost_key) - .expect("no cost log"); - assert!( - far_call_cost_log.previous_value.is_zero(), - "{far_call_cost_log:?}" - ); - let far_call_cost = h256_to_u256(far_call_cost_log.log.value); - assert!(far_call_cost > 10_000.into(), "{far_call_cost}"); + test_get_used_contracts_with_out_of_gas_far_call::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs deleted file mode 100644 index dde83d8a9f3..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_latest::tests::tester::VmTesterBuilder; -use crate::vm_latest::types::inputs::system_env::TxExecutionMode; -use crate::vm_latest::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs index df8d992f02f..522aa2413f6 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/is_write_initial.rs @@ -1,46 +1,6 @@ -use zksync_types::get_nonce_key; - -use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - vm_fast::tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; +use crate::{versions::testonly::is_write_initial::test_is_write_initial_behaviour, vm_fast::Vm}; #[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); +fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 5897ec5f266..0174eeffd7e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -1,198 +1,16 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::StorageWritesDeduplicator, - vm_fast::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - transaction_data::TransactionData, + versions::testonly::l1_tx_execution::{ + test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, }, + vm_fast::Vm, }; #[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 9 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - `gasPerPubdataByte` - // - `basePubdataSpent` - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. - let basic_initial_writes = 5; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - assert!(!res.result.is_failed()); - - for (expected_value, storage_location) in [ - (U256::from(1u32), known_codes_key), - (h256_to_u256(deploy_tx.bytecode_hash), account_code_key), - ] { - assert_eq!( - expected_value, - vm.vm.inner.world_diff().get_storage_state()[&( - *storage_location.address(), - h256_to_u256(*storage_location.key()) - )] - ); - } - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes, basic_initial_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. - // But now the base pubdata spent has changed too. - assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - assert_eq!(res.initial_storage_writes, basic_initial_writes + 1); - assert_eq!(res.repeated_storage_writes, 1); +fn l1_tx_execution() { + test_l1_tx_execution::>(); } #[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - value: 0.into(), - factory_deps: vec![], - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); +fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index fde94d9da6c..0823bee6cc9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -1,424 +1,33 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, L2BlockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, - ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{ - storage::ReadStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, - }, - versions::testonly::default_l1_batch, - vm_fast::{tests::tester::VmTesterBuilder, vm::Vm}, - vm_latest::{ - constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, - utils::l2_blocks::get_l2_block_hash_key, + versions::testonly::l2_blocks::{ + test_l2_block_first_in_batch, test_l2_block_initialization_number_non_zero, + test_l2_block_initialization_timestamp, test_l2_block_new_l2_block, + test_l2_block_same_l2_block, }, + vm_fast::Vm, }; -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: Some(H160::zero()), - calldata: vec![], - value: U256::zero(), - factory_deps: vec![], - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - #[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current L2 block to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); +fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::>(); } #[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first L2 block number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::>(); } #[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::>(); } #[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let mut storage_ptr = vm.vm.world.storage.borrow_mut(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr.set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.set_value( - prev_block_hash_position, - L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), - ); - drop(storage_ptr); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::>(); } #[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.write_to_bootloader_heap([ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ]) +fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs index 730c573cdcf..b29ca6ed7f8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/mod.rs @@ -1,26 +1,164 @@ -mod block_tip; -mod bootloader; -mod bytecode_publishing; -mod default_aa; -// mod call_tracer; FIXME: requires tracers -mod circuits; -mod code_oracle; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -// mod prestate_tracer; FIXME: is pre-state tracer still relevant? -mod refunds; -mod require_eip712; -mod rollbacks; -mod sekp256r1; -mod simple_execution; -mod storage; -mod tester; -mod tracing_execution_error; -mod transfer; -mod upgrade; -mod utils; +use std::{any::Any, collections::HashSet, fmt, rc::Rc}; + +use zksync_types::{writes::StateDiffRecord, StorageKey, Transaction, H160, H256, U256}; +use zksync_utils::h256_to_u256; +use zksync_vm2::interface::{Event, HeapId, StateInterface}; +use zksync_vm_interface::{ + pubdata::PubdataBuilder, storage::ReadStorage, CurrentExecutionState, L2BlockEnv, + VmExecutionMode, VmExecutionResultAndLogs, VmInterface, +}; + +use super::Vm; +use crate::{ + interface::storage::{ImmutableStorageView, InMemoryStorage}, + versions::testonly::TestedVm, + vm_fast::CircuitsTracer, +}; + +// mod block_tip; +// mod bootloader; +// mod bytecode_publishing; +// mod circuits; +// mod code_oracle; +// mod default_aa; +// mod gas_limit; +// mod get_used_contracts; +// mod is_write_initial; +// mod l1_tx_execution; +// mod l2_blocks; +// mod nonce_holder; +// mod precompiles; +// mod refunds; +// mod require_eip712; +// mod rollbacks; +// mod secp256r1; +// mod simple_execution; +// mod storage; +// mod tracing_execution_error; +// mod transfer; +// mod upgrade; + +trait ObjectSafeEq: fmt::Debug + AsRef { + fn eq(&self, other: &dyn ObjectSafeEq) -> bool; +} + +#[derive(Debug)] +struct BoxedEq(T); + +impl AsRef for BoxedEq { + fn as_ref(&self) -> &dyn Any { + &self.0 + } +} + +impl ObjectSafeEq for BoxedEq { + fn eq(&self, other: &dyn ObjectSafeEq) -> bool { + let Some(other) = other.as_ref().downcast_ref::() else { + return false; + }; + self.0 == *other + } +} + +// TODO this doesn't include all the state of ModifiedWorld +#[derive(Debug)] +pub(crate) struct VmStateDump { + state: Box, + storage_writes: Vec<((H160, U256), U256)>, + events: Box<[Event]>, +} + +impl PartialEq for VmStateDump { + fn eq(&self, other: &Self) -> bool { + self.state.as_ref().eq(other.state.as_ref()) + && self.storage_writes == other.storage_writes + && self.events == other.events + } +} + +impl TestedVm for Vm> { + type StateDump = VmStateDump; + + fn dump_state(&self) -> Self::StateDump { + VmStateDump { + state: Box::new(BoxedEq(self.inner.dump_state())), + storage_writes: self.inner.get_storage_state().collect(), + events: self.inner.events().collect(), + } + } + + fn gas_remaining(&mut self) -> u32 { + self.gas_remaining() + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_current_execution_state() + } + + fn decommitted_hashes(&self) -> HashSet { + self.decommitted_hashes().collect() + } + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs { + self.enforce_state_diffs(diffs); + self.finish_batch(pubdata_builder) + .block_tip_execution_result + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner(&mut Default::default(), VmExecutionMode::Batch) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + self.insert_bytecodes(bytecodes.iter().copied()) + } + + fn known_bytecode_hashes(&self) -> HashSet { + self.world.bytecode_cache.keys().copied().collect() + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + let (_, is_fresh) = self.inner.world_diff_mut().decommit_opcode( + &mut self.world, + &mut ((), CircuitsTracer::default()), + h256_to_u256(code_hash), + ); + is_fresh + } + + fn verify_required_bootloader_heap(&self, required_values: &[(u32, U256)]) { + for &(slot, expected_value) in required_values { + let current_value = self.inner.read_heap_u256(HeapId::FIRST, slot * 32); + assert_eq!(current_value, expected_value); + } + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + self.write_to_bootloader_heap(cells.iter().copied()); + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + let storage_changes = self.inner.world_diff().get_storage_state(); + let main_storage = &mut self.world.storage; + storage_changes + .get(&(*key.account().address(), h256_to_u256(*key.key()))) + .copied() + .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) + } + + fn last_l2_block_hash(&self) -> H256 { + self.bootloader_state.last_l2_block().get_hash() + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.bootloader_state.push_l2_block(block); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + self.push_transaction_inner(tx, refund, true); + } +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index 6d1e0f016e9..438d6aabe55 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -1,180 +1,6 @@ -use zksync_types::{Execute, ExecuteTransactionCommon, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, - VmRevertReason, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} +use crate::{versions::testonly::nonce_holder::test_nonce_holder, vm_fast::Vm}; #[test] -fn test_nonce_holder() { - let mut account = Account::random(); - let hex_addr = hex::encode(account.address.to_fixed_bytes()); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![ContractToDeploy::account( - read_nonce_holder_tester().to_vec(), - account.address, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction = account.get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ); - let ExecuteTransactionCommon::L2(tx_data) = &mut transaction.common_data else { - unreachable!(); - }; - tx_data.signature = vec![test_mode.into()]; - vm.vm.push_transaction_inner(transaction, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!(reason.to_string(), expected_error.to_string(), "{comment}"); - } else { - assert!(!result.result.is_failed(), "{comment}: {result:?}"); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), - "Allowed to leave nonce as unused", - ); +fn nonce_holder() { + test_nonce_holder::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index b3ca1596217..ccf1463979c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -1,116 +1,19 @@ -use circuit_sequencer_api_1_5_0::geometry_config::get_geometry_config; -use zksync_types::{Address, Execute}; - -use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + versions::testonly::precompiles::{test_ecrecover, test_keccak, test_sha256}, + vm_fast::Vm, }; #[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let keccak_count = exec_result.statistics.circuit_statistic.keccak256 - * get_geometry_config().cycles_per_keccak256_circuit as f32; - assert!(keccak_count >= 1000.0, "{keccak_count}"); +fn keccak() { + test_keccak::>(); } #[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(sha1000_calldata).unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let sha_count = exec_result.statistics.circuit_statistic.sha256 - * get_geometry_config().cycles_per_sha256_circuit as f32; - assert!(sha_count >= 1000.0, "{sha_count}"); +fn sha256() { + test_sha256::>(); } #[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account.address), - calldata: vec![], - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - - let exec_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover - * get_geometry_config().cycles_per_ecrecover_circuit as f32; - assert!((ecrecover_count - 1.0).abs() < 1e-4, "{ecrecover_count}"); +fn ecrecover() { + test_ecrecover::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs deleted file mode 100644 index 63620c7d9ff..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/prestate_tracer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_test_account::TxType; -use zksync_types::{utils::deployed_address_create, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::PrestateTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, -}; - -#[test] -fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - vm.deploy_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm.test_contract.unwrap(), - false, - Default::default(), - true, - TxType::L2, - ); - vm.vm.push_transaction(tx1); - - let contract_address = vm.test_contract.unwrap(); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - assert!(prestate_result.1.contains_key(&contract_address)); -} - -#[test] -fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); - vm.test_contract = Some(deployed_address); - - // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce2 = tx2.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); - - let account = &mut vm.rich_accounts[0]; - - //enter ether to contract to see difference in the balance post execution - let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), - calldata: Default::default(), - value: U256::from(100000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); - - let tx1 = Execute { - contract_address: deployed_address2, - calldata: Default::default(), - value: U256::from(200000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx1, None)); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - //assert that the pre-state contains both deployed contracts with balance zero - assert!(prestate_result.0.contains_key(&deployed_address)); - assert!(prestate_result.0.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.0[&deployed_address].balance, - Some(U256::zero()) - ); - assert_eq!( - prestate_result.0[&deployed_address2].balance, - Some(U256::zero()) - ); - - //assert that the post-state contains both deployed contracts with the correct balance - assert!(prestate_result.1.contains_key(&deployed_address)); - assert!(prestate_result.1.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.1[&deployed_address].balance, - Some(U256::from(100000)) - ); - assert_eq!( - prestate_result.1[&deployed_address2].balance, - Some(U256::from(200000)) - ); -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 1856995149a..335cb4afb1c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -1,221 +1,16 @@ -use ethabi::Token; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{read_expensive_contract, read_test_contract}, + versions::testonly::refunds::{ + test_negative_pubdata_for_transaction, test_predetermined_refunded_gas, }, + vm_fast::Vm, }; #[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - vm.vm - .push_transaction_inner(tx.clone(), result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_transaction_inner(tx, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .deduplicated_storage_logs - .len(), - current_state_without_predefined_refunds - .deduplicated_storage_logs - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); +fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::>(); } #[test] fn negative_pubdata_for_transaction() { - let expensive_contract_address = Address::random(); - let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); - let expensive_function = expensive_contract.function("expensive").unwrap(); - let cleanup_function = expensive_contract.function("cleanUp").unwrap(); - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new( - expensive_contract_bytecode, - expensive_contract_address, - )]) - .build(); - - let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: expensive_function - .encode_input(&[Token::Uint(10.into())]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. - let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: cleanup_function.encode_input(&[]).unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - assert!(result.refunds.operator_suggested_refund > 0); - assert_eq!( - result.refunds.gas_refunded, - result.refunds.operator_suggested_refund - ); + test_negative_pubdata_for_transaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index e119cea0114..22e4ebf258c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -1,176 +1,6 @@ -use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; -use zksync_utils::h256_to_u256; +use crate::{versions::testonly::require_eip712::test_require_eip712, vm_fast::Vm}; -use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, -}; - -impl VmTester<()> { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &address, - ); - self.vm - .inner - .world_diff() - .get_storage_state() - .get(&(L2_BASE_TOKEN_ADDRESS, h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(self.vm.world.storage.read_value(&key))) - } -} - -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -#[tokio::test] -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_custom_contracts(vec![ContractToDeploy::account( - bytecode, - account_abstraction.address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account_abstraction.address), - calldata: encoded_input, - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.into(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - Some(beneficiary.address), - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - vec![], - Default::default(), - ); - - let mut transaction_request: TransactionRequest = tx_712.into(); - transaction_request.chain_id = Some(chain_id.into()); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.into(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); +#[test] +fn require_eip712() { + test_require_eip712::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index 548bf8daadf..e8af23fa1e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,173 +1,21 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Execute, Nonce, U256}; - use crate::{ - interface::TxExecutionMode, - vm_fast::tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, + versions::testonly::rollbacks::{ + test_rollback_in_call_mode, test_vm_loadnext_rollbacks, test_vm_rollbacks, }, + vm_fast::Vm, }; #[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected( - tx_2.clone(), - TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), - ), - TransactionTestInfo::new_rejected( - tx_0.clone(), - TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), - ), - ]); - - pretty_assertions::assert_eq!(result_without_rollbacks, result_with_rollbacks); +fn vm_rollbacks() { + test_vm_rollbacks::>(); } #[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused( - loadnext_deploy_tx.initiator_account(), - loadnext_deploy_tx.nonce().unwrap(), - ) - .into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); +fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::>(); +} - assert_eq!(result_without_rollbacks, result_with_rollbacks); +#[test] +fn rollback_in_call_mode() { + test_rollback_in_call_mode::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs new file mode 100644 index 00000000000..d9661c7f713 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_fast/tests/secp256r1.rs @@ -0,0 +1,6 @@ +use crate::{versions::testonly::secp256r1::test_secp256r1, vm_fast::Vm}; + +#[test] +fn secp256r1() { + test_secp256r1::>(); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs index 8c916a541e2..4fe33d237e9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/simple_execution.rs @@ -1,80 +1,14 @@ -use assert_matches::assert_matches; - use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_fast::tests::tester::{TxType, VmTesterBuilder}, + versions::testonly::simple_execution::{test_estimate_fee, test_simple_execute}, + vm_fast::Vm, }; #[test] fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); + test_estimate_fee::>(); } #[test] fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); + test_simple_execute::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index 2cfadb640e7..54a38814d3b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -1,133 +1,14 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{ - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - versions::testonly::ContractToDeploy, - vm_fast::tests::tester::VmTesterBuilder, + versions::testonly::storage::{test_storage_behavior, test_transient_storage_behavior}, + vm_fast::Vm, }; -fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 { - let bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let test_contract_address = Address::random(); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata: first_tx_calldata, - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata: second_tx_calldata, - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "First tx failed"); - vm.vm.pop_snapshot_no_rollback(); - - // We rollback once because transient storage and rollbacks are a tricky combination. - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx2.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Second tx failed"); - vm.vm.rollback_to_the_latest_snapshot(); - - vm.vm.make_snapshot(); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Second tx failed on second run"); - - result.statistics.pubdata_published -} - -fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { - test_storage(vec![], second_tx_calldata) -} - #[test] -fn test_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - // In all of the tests below we provide the first tx to ensure that the tracers will not include - // the statistics from the start of the bootloader and will only include those for the transaction itself. - - let base_pubdata = test_storage_one_tx(vec![]); - let simple_test_pubdata = test_storage_one_tx( - contract - .function("simpleWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_pubdata = test_storage_one_tx( - contract - .function("resettingWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_via_revert_pubdata = test_storage_one_tx( - contract - .function("resettingWriteViaRevert") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - - assert_eq!(simple_test_pubdata - base_pubdata, 65); - assert_eq!(resetting_write_pubdata - base_pubdata, 34); - assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); +fn storage_behavior() { + test_storage_behavior::>(); } #[test] -fn test_transient_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let first_tstore_test = contract - .function("testTransientStore") - .unwrap() - .encode_input(&[]) - .unwrap(); - // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. - let second_tstore_test = contract - .function("assertTValue") - .unwrap() - .encode_input(&[Token::Uint(U256::zero())]) - .unwrap(); - - test_storage(first_tstore_test, second_tstore_test); +fn transient_storage_behavior() { + test_transient_storage_behavior::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs deleted file mode 100644 index 212e569d510..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{get_empty_storage, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs deleted file mode 100644 index 5cc9ead8b54..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,242 +0,0 @@ -use std::fmt; - -use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; -use zksync_vm2::interface::{Event, StateInterface}; - -use super::VmTester; -use crate::{ - interface::{ - storage::ReadStorage, CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, - VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_fast::Vm, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce(Nonce, Nonce), - NonceReused(H160, Nonce), -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector: vec![144, 240, 73, 201], - data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 45], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector: vec![144, 240, 73, 201], - data: vec![144, 240, 73, 201, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1], - }) - - } - TxModifier::WrongNonce(expected, actual) => { - let function_selector = vec![98, 106, 222, 48]; - let expected_nonce_bytes = expected.0.to_be_bytes().to_vec(); - let actual_nonce_bytes = actual.0.to_be_bytes().to_vec(); - // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field - let nonce_padding = vec![0u8; 28]; - let data = [function_selector.clone(), nonce_padding.clone(), expected_nonce_bytes, nonce_padding.clone(), actual_nonce_bytes].concat(); - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector, - data - }) - } - TxModifier::NonceReused(addr, nonce) => { - let function_selector = vec![233, 10, 222, 212]; - let addr = addr.as_bytes().to_vec(); - // padding is 12 because an address takes up 20 bytes and we need it to fill a 32 byte field - let addr_padding = vec![0u8; 12]; - // padding is 28 because an address takes up 4 bytes and we need it to fill a 32 byte field - let nonce_padding = vec![0u8; 28]; - let data = [function_selector.clone(), addr_padding, addr, nonce_padding, nonce.0.to_be_bytes().to_vec()].concat(); - Halt::ValidationFailed(VmRevertReason::Unknown { - function_selector, - data, - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce(_, _) => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused(_, _) => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -// TODO this doesn't include all the state of ModifiedWorld -#[derive(Debug)] -struct VmStateDump { - state: S, - storage_writes: Vec<((H160, U256), U256)>, - events: Box<[Event]>, -} - -impl PartialEq for VmStateDump { - fn eq(&self, other: &Self) -> bool { - self.state == other.state - && self.storage_writes == other.storage_writes - && self.events == other.events - } -} - -impl Vm { - fn dump_state(&self) -> VmStateDump { - VmStateDump { - state: self.inner.dump_state(), - storage_writes: self.inner.get_storage_state().collect(), - events: self.inner.events().collect(), - } - } -} - -impl VmTester<()> { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - self.vm.make_snapshot(); - let inner_state_before = self.vm.dump_state(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_state(); - pretty_assertions::assert_eq!( - inner_state_before, - inner_state_after, - "Inner state before and after rollback should be equal" - ); - } else { - self.vm.pop_snapshot_no_rollback(); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs deleted file mode 100644 index 9549b32c4f1..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs +++ /dev/null @@ -1,231 +0,0 @@ -use std::{cell::RefCell, rc::Rc}; - -use zksync_contracts::BaseSystemContracts; -use zksync_test_account::{Account, TxType}; -use zksync_types::{ - block::L2BlockHasher, utils::deployed_address_create, AccountTreeId, Address, L1BatchNumber, - L2BlockNumber, Nonce, StorageKey, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use zksync_vm2::{interface::Tracer, WorldDiff}; - -use crate::{ - interface::{ - storage::{InMemoryStorage, StoragePtr}, - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - versions::{ - testonly::{default_l1_batch, default_system_env, make_account_rich, ContractToDeploy}, - vm_fast::{tests::utils::read_test_contract, vm::Vm}, - }, - vm_latest::utils::l2_blocks::load_last_l2_block, -}; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, Tr>, - pub(crate) storage: StoragePtr, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) fee_account: Address, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.inspect(&mut Tr::default(), VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = Rc::new(RefCell::new(get_empty_storage())); - *self.vm.inner.world_diff_mut() = WorldDiff::default(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(&mut self.storage.borrow_mut(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(&mut self.storage.borrow_mut(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let storage = self.storage.clone(); - { - let mut storage = storage.borrow_mut(); - // Commit pending storage changes (old VM versions commit them on successful execution) - for (&(address, slot), &value) in self.vm.inner.world_diff().get_storage_state() { - let key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(slot)); - storage.set_value(key, u256_to_h256(value)); - } - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(&storage).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::custom(l1_batch, self.vm.system_env.clone(), storage); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - self.vm = vm; - } -} - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -impl VmTesterBuilder { - pub(crate) fn new() -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: default_system_env(), - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester<()> { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); - let storage_ptr = Rc::new(RefCell::new(raw_storage)); - for account in self.rich_accounts.iter() { - make_account_rich(&mut storage_ptr.borrow_mut(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(&mut storage_ptr.borrow_mut(), deployer); - } - - let fee_account = l1_batch_env.fee_account; - let vm = Vm::custom(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - deployer: self.deployer, - test_contract: None, - fee_account, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs index 89f0fa23620..b3f5b4b33bc 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -1,55 +1,8 @@ -use zksync_types::{Execute, H160}; - use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, + versions::testonly::tracing_execution_error::test_tracing_of_execution_errors, vm_fast::Vm, }; #[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![ContractToDeploy::new( - read_error_contract(), - contract_address, - )]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(contract_address), - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); +fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index ef510546f11..57c2c3e2c34 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -1,215 +1,16 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - versions::testonly::ContractToDeploy, - vm_fast::tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::get_balance, + versions::testonly::transfer::{ + test_reentrancy_protection_send_and_transfer, test_send_and_transfer, }, + vm_fast::Vm, }; -enum TestOptions { - Send(U256), - Transfer(U256), -} - -fn test_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let recipient_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - - let test_contract_address = Address::random(); - let recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - }; - - let mut storage = get_empty_storage(); - storage.set_value( - storage_key_for_eth_balance(&test_contract_address), - u256_to_h256(value), - ); - - let mut vm = VmTesterBuilder::new() - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - ContractToDeploy::new(test_bytecode, test_contract_address), - ContractToDeploy::new(recipient_bytecode, recipient_address), - ]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx_result.result.is_failed(), - "Transaction wasn't successful" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); - - let new_recipient_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &recipient_address, - &mut vm.vm.world.storage, - vm.vm.inner.world_diff().get_storage_state(), - ); - - assert_eq!(new_recipient_balance, value); -} - #[test] -fn test_send_and_transfer() { - test_send_or_transfer(TestOptions::Send(U256::zero())); - test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); -} - -fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - - let test_contract_address = Address::random(); - let reentrant_recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipient_address), - Token::Uint(value), - ]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipient_address), - Token::Uint(value), - ]) - .unwrap(), - ), - }; - - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - ContractToDeploy::new(test_bytecode, test_contract_address), - ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), - ]) - .build(); - - // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. - let account = &mut vm.rich_accounts[0]; - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(reentrant_recipient_address), - calldata: reentrant_recipient_abi - .function("setX") - .unwrap() - .encode_input(&[]) - .unwrap(), - value: U256::from(1), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx1_result.result.is_failed(), - "Transaction 1 wasn't successful" - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value, - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - tx2_result.result.is_failed(), - "Transaction 2 should have failed, but it succeeded" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +fn send_and_transfer() { + test_send_and_transfer::>(); } #[test] -fn test_reentrancy_protection_send_and_transfer() { - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( - U256::from(10).pow(18.into()), - )); +fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index ba4863f7c45..4e4533c6868 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -1,343 +1,21 @@ -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - VmInterfaceHistoryEnabled, - }, - vm_fast::tests::{ - tester::VmTesterBuilder, - utils::{ - get_complex_upgrade_abi, read_complex_upgrade, read_test_contract, - verify_required_storage, - }, + versions::testonly::upgrade::{ + test_complex_upgrader, test_force_deploy_upgrade, test_protocol_upgrade_is_first, }, + vm_fast::Vm, }; -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block #[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); +fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::>(); } -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. #[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = [(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage( - &expected_slots, - &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff().get_storage_state(), - ); +fn force_deploy_upgrade() { + test_force_deploy_upgrade::>(); } -/// Here we show how the work with the complex upgrader could be done #[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new() - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - { - let mut storage = vm.storage.borrow_mut(); - storage.set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage.set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage.set_value(account_code_key, bytecode_hash); - storage.store_factory_dep(bytecode_hash, read_complex_upgrade()); - storage.store_factory_dep(msg_sender_test_hash, read_msg_sender_test()); - } - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = [ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage( - &expected_slots, - &mut *vm.storage.borrow_mut(), - vm.vm.inner.world_diff().get_storage_state(), - ); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: Some(COMPLEX_UPGRADER_ADDRESS), - calldata: complex_upgrader_calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") +fn complex_upgrader() { + test_complex_upgrader::>(); } diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs deleted file mode 100644 index 5ab5aa0dec9..00000000000 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ /dev/null @@ -1,137 +0,0 @@ -use std::collections::BTreeMap; - -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H160, H256, - U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm2::interface::{HeapId, StateInterface}; - -use crate::interface::storage::ReadStorage; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -pub(crate) fn verify_required_memory( - state: &impl StateInterface, - required_values: Vec<(U256, HeapId, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state.read_heap_u256(memory_page, cell * 32); - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn verify_required_storage( - required_values: &[(H256, StorageKey)], - main_storage: &mut impl ReadStorage, - storage_changes: &BTreeMap<(H160, U256), U256>, -) { - for &(required_value, key) in required_values { - let current_value = storage_changes - .get(&(*key.account().address(), h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: &mut impl ReadStorage, - storage_changes: &BTreeMap<(H160, U256), U256>, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - - storage_changes - .get(&(*key.account().address(), h256_to_u256(*key.key()))) - .copied() - .unwrap_or_else(|| h256_to_u256(main_storage.read_value(&key))) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn load_precompiles_contract() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -pub(crate) fn read_expensive_contract() -> (Vec, Contract) { - const PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; - (read_bytecode(PATH), load_contract(PATH)) -} - -pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { - const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; - (read_bytecode(PATH), load_contract(PATH)) -} diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 41e37c0d0ba..435b6529c9e 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,6 +1,8 @@ -use std::{collections::HashMap, fmt, mem}; +use std::{collections::HashMap, fmt, mem, rc::Rc}; -use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; +use zk_evm_1_5_0::{ + aux_structures::LogQuery, zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, +}; use zksync_contracts::SystemContractCode; use zksync_types::{ l1::is_l1_tx_type, @@ -11,14 +13,15 @@ use zksync_types::{ BYTES_PER_ENUMERATION_INDEX, }, AccountTreeId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, - BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + Transaction, BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use zksync_vm2::{ interface::{CallframeInterface, HeapId, StateInterface, Tracer}, - ExecutionEnd, FatPointer, Program, Settings, VirtualMachine, + ExecutionEnd, FatPointer, Program, Settings, StorageSlot, VirtualMachine, }; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use super::{ bootloader_state::{BootloaderState, BootloaderStateSnapshot}, @@ -33,11 +36,12 @@ use crate::{ interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, - TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, - VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, - VmRevertReason, VmTrackingContracts, + ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, PushTransactionResult, + Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, + VmExecutionResultAndLogs, VmExecutionStatistics, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmRevertReason, VmTrackingContracts, }, + is_supported_by_fast_vm, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ bootloader_state::utils::{apply_l2_block, apply_pubdata_to_memory}, @@ -58,6 +62,31 @@ const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemo type FullTracer = (Tr, CircuitsTracer); +#[derive(Debug)] +struct VmRunResult { + execution_result: ExecutionResult, + /// `true` if VM execution has terminated (as opposed to being stopped on a hook, e.g. when executing a single transaction + /// in a batch). Used for `execution_result == Revert { .. }` to understand whether VM logs should be reverted. + execution_ended: bool, + refunds: Refunds, + /// This value is used in stats. It's defined in the old VM as the latest value used when computing refunds (see the refunds tracer for `vm_latest`). + /// This is **not** equal to the pubdata diff before and after VM execution; e.g., when executing a batch tip, + /// `pubdata_published` is always 0 (since no refunds are computed). + pubdata_published: u32, +} + +impl VmRunResult { + fn should_ignore_vm_logs(&self) -> bool { + match &self.execution_result { + ExecutionResult::Success { .. } => false, + ExecutionResult::Halt { .. } => true, + // Logs generated during reverts should only be ignored if the revert has reached the root (bootloader) call frame, + // which is only possible with `TxExecutionMode::EthCall`. + ExecutionResult::Revert { .. } => self.execution_ended, + } + } +} + /// Fast VM wrapper. /// /// The wrapper is parametric by the storage and tracer types. Besides the [`Tracer`] trait, a tracer must have `'static` lifetime @@ -75,8 +104,14 @@ pub struct Vm { enforced_state_diffs: Option>, } -impl Vm { +impl Vm { pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { + assert!( + is_supported_by_fast_vm(system_env.version), + "Protocol version {:?} is not supported by fast VM", + system_env.version + ); + let default_aa_code_hash = system_env .base_system_smart_contracts .default_aa @@ -140,32 +175,35 @@ impl Vm { execution_mode: VmExecutionMode, tracer: &mut (Tr, CircuitsTracer), track_refunds: bool, - ) -> (ExecutionResult, Refunds) { + ) -> VmRunResult { let mut refunds = Refunds { gas_refunded: 0, operator_suggested_refund: 0, }; let mut last_tx_result = None; let mut pubdata_before = self.inner.pubdata() as u32; + let mut pubdata_published = 0; - let result = loop { + let (execution_result, execution_ended) = loop { let hook = match self.inner.run(&mut self.world, tracer) { ExecutionEnd::SuspendedOnHook(hook) => hook, - ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, + ExecutionEnd::ProgramFinished(output) => { + break (ExecutionResult::Success { output }, true); + } ExecutionEnd::Reverted(output) => { - break match TxRevertReason::parse_error(&output) { + let result = match TxRevertReason::parse_error(&output) { TxRevertReason::TxReverted(output) => ExecutionResult::Revert { output }, TxRevertReason::Halt(reason) => ExecutionResult::Halt { reason }, - } + }; + break (result, true); } ExecutionEnd::Panicked => { - break ExecutionResult::Halt { - reason: if self.gas_remaining() == 0 { - Halt::BootloaderOutOfGas - } else { - Halt::VMPanic - }, - } + let reason = if self.gas_remaining() == 0 { + Halt::BootloaderOutOfGas + } else { + Halt::VMPanic + }; + break (ExecutionResult::Halt { reason }, true); } }; @@ -175,7 +213,7 @@ impl Vm { } Hook::TxHasEnded => { if let VmExecutionMode::OneTx = execution_mode { - break last_tx_result.take().unwrap(); + break (last_tx_result.take().unwrap(), false); } } Hook::AskOperatorForRefund => { @@ -192,7 +230,8 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.pubdata() as u32; + let pubdata_after = self.inner.pubdata() as u32; + pubdata_published = pubdata_after.saturating_sub(pubdata_before); refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -200,7 +239,7 @@ impl Vm { gas_spent_on_pubdata.as_u64(), tx_gas_limit, gas_per_pubdata_byte.low_u32(), - pubdata_published.saturating_sub(pubdata_before), + pubdata_published, self.bootloader_state .last_l2_block() .txs @@ -209,7 +248,7 @@ impl Vm { .hash, ); - pubdata_before = pubdata_published; + pubdata_before = pubdata_after; let refund_value = refunds.operator_suggested_refund; self.write_to_bootloader_heap([( OPERATOR_REFUNDS_OFFSET + current_tx_index, @@ -305,7 +344,12 @@ impl Vm { } }; - (result, refunds) + VmRunResult { + execution_result, + execution_ended, + refunds, + pubdata_published, + } } fn get_hook_params(&self) -> [U256; 3] { @@ -419,10 +463,10 @@ impl Vm { } // FIXME: restore this function once fast vm is enabled - // #[cfg(test)] - // pub(super) fn enforce_state_diffs(&mut self, diffs: Vec) { - // self.enforced_state_diffs = Some(diffs); - // } + #[cfg(test)] + pub(super) fn enforce_state_diffs(&mut self, diffs: Vec) { + self.enforced_state_diffs = Some(diffs); + } fn compute_state_diffs(&mut self) -> Vec { #[cfg(test)] @@ -431,24 +475,24 @@ impl Vm { } let storage = &mut self.world.storage; - let diffs = self.inner.world_diff().get_storage_changes().map( - move |((address, key), (initial_value, final_value))| { - let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); - StateDiffRecord { - address, - key, - derived_key: - zk_evm_1_5_0::aux_structures::LogQuery::derive_final_address_for_params( - &address, &key, - ), - enumeration_index: storage - .get_enumeration_index(&storage_key) - .unwrap_or_default(), - initial_value: initial_value.unwrap_or_default(), - final_value, - } - }, - ); + let diffs = + self.inner + .world_diff() + .get_storage_changes() + .map(move |((address, key), change)| { + let storage_key = + StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); + StateDiffRecord { + address, + key, + derived_key: LogQuery::derive_final_address_for_params(&address, &key), + enumeration_index: storage + .get_enumeration_index(&storage_key) + .unwrap_or_default(), + initial_value: change.before, + final_value: change.after, + } + }); diffs .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) .collect() @@ -478,9 +522,9 @@ impl Vm { events, deduplicated_storage_logs: world_diff .get_storage_changes() - .map(|((address, key), (_, value))| StorageLog { + .map(|((address, key), change)| StorageLog { key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), - value: u256_to_h256(value), + value: u256_to_h256(change.after), kind: StorageLogKind::RepeatedWrite, // Initialness doesn't matter here }) .collect(), @@ -491,33 +535,10 @@ impl Vm { pubdata_costs: world_diff.pubdata_costs().to_vec(), } } -} - -impl VmFactory> for Vm, Tr> -where - S: ReadStorage, - Tr: Tracer + Default + 'static, -{ - fn new( - batch_env: L1BatchEnv, - system_env: SystemEnv, - storage: StoragePtr>, - ) -> Self { - let storage = ImmutableStorageView::new(storage); - Self::custom(batch_env, system_env, storage) - } -} -impl VmInterface for Vm { - type TracerDispatcher = Tr; - - fn push_transaction(&mut self, tx: zksync_types::Transaction) { - self.push_transaction_inner(tx, 0, true); - } - - fn inspect( + pub(crate) fn inspect_inner( &mut self, - tracer: &mut Self::TracerDispatcher, + tracer: &mut Tr, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { let mut track_refunds = false; @@ -528,18 +549,19 @@ impl VmInterface for Vm { } let start = self.inner.world_diff().snapshot(); - let pubdata_before = self.inner.pubdata(); let gas_before = self.gas_remaining(); let mut full_tracer = (mem::take(tracer), CircuitsTracer::default()); - let (result, refunds) = self.run(execution_mode, &mut full_tracer, track_refunds); + let result = self.run(execution_mode, &mut full_tracer, track_refunds); *tracer = full_tracer.0; // place the tracer back - let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) - && matches!(result, ExecutionResult::Halt { .. }); + let ignore_world_diff = + matches!(execution_mode, VmExecutionMode::OneTx) && result.should_ignore_vm_logs(); // If the execution is halted, the VM changes are expected to be rolled back by the caller. // Earlier VMs return empty execution logs in this case, so we follow this behavior. + // Likewise, if a revert has reached the bootloader frame (possible with `TxExecutionMode::EthCall`; otherwise, the bootloader catches reverts), + // old VMs revert all logs; the new VM doesn't do that automatically, so we recreate this behavior here. let logs = if ignore_world_diff { VmExecutionLogs::default() } else { @@ -557,7 +579,7 @@ impl VmInterface for Vm { StorageLogKind::RepeatedWrite }, }, - previous_value: u256_to_h256(change.before.unwrap_or_default()), + previous_value: u256_to_h256(change.before), }) .collect(); let events = merge_events( @@ -585,26 +607,65 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.pubdata(); let gas_remaining = self.gas_remaining(); + let gas_used = gas_before - gas_remaining; + VmExecutionResultAndLogs { - result, + result: result.execution_result, logs, // TODO (PLA-936): Fill statistics; investigate whether they should be zeroed on `Halt` statistics: VmExecutionStatistics { + gas_used: gas_used.into(), + gas_remaining, + computational_gas_used: gas_used, // since 1.5.0, this always has the same value as `gas_used` + pubdata_published: result.pubdata_published, + circuit_statistic: full_tracer.1.circuit_statistic(), contracts_used: 0, cycles_used: 0, - gas_used: (gas_before - gas_remaining).into(), - gas_remaining, - computational_gas_used: 0, total_log_queries: 0, - pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, - circuit_statistic: full_tracer.1.circuit_statistic(), }, - refunds, + refunds: result.refunds, + new_known_factory_deps: None, + } + } +} + +impl VmFactory> for Vm, Tr> +where + S: ReadStorage, + Tr: Tracer + Default + 'static, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let storage = ImmutableStorageView::new(storage); + Self::custom(batch_env, system_env, storage) + } +} + +impl VmInterface for Vm { + type TracerDispatcher = Tr; + + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_inner(tx, 0, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), } } + fn inspect( + &mut self, + tracer: &mut Self::TracerDispatcher, + execution_mode: InspectExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inspect_inner(tracer, execution_mode.into()) + } + fn inspect_transaction_with_bytecode_compression( &mut self, tracer: &mut Self::TracerDispatcher, @@ -612,7 +673,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_inner(tx, 0, with_compression); - let result = self.inspect(tracer, VmExecutionMode::OneTx); + let result = self.inspect(tracer, InspectExecutionMode::OneTx); let compression_result = if self.has_unpublished_bytecodes() { Err(BytecodeCompressionError::BytecodeCompressionFailed) @@ -629,12 +690,8 @@ impl VmInterface for Vm { self.bootloader_state.start_new_l2_block(l2_block_env) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - todo!("Unused during batch execution") - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut Tr::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut Tr::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { @@ -745,20 +802,27 @@ impl World { } impl zksync_vm2::StorageInterface for World { - fn read_storage(&mut self, contract: H160, key: U256) -> Option { + fn read_storage(&mut self, contract: H160, key: U256) -> StorageSlot { let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); - if self.storage.is_write_initial(key) { - None - } else { - Some(self.storage.read_value(key).as_bytes().into()) + let value = U256::from_big_endian(self.storage.read_value(key).as_bytes()); + // `is_write_initial` value can be true even if the slot has previously been written to / has non-zero value! + // This can happen during oneshot execution (i.e., executing a single transaction) since it emulates + // execution starting in the middle of a batch in the general case. Hence, a slot that was first written to in the batch + // must still be considered an initial write by the refund logic. + let is_write_initial = self.storage.is_write_initial(key); + StorageSlot { + value, + is_write_initial, } } - fn cost_of_writing_storage(&mut self, initial_value: Option, new_value: U256) -> u32 { - let is_initial = initial_value.is_none(); - let initial_value = initial_value.unwrap_or_default(); + fn read_storage_value(&mut self, contract: H160, key: U256) -> U256 { + let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); + U256::from_big_endian(self.storage.read_value(key).as_bytes()) + } - if initial_value == new_value { + fn cost_of_writing_storage(&mut self, slot: StorageSlot, new_value: U256) -> u32 { + if slot.value == new_value { return 0; } @@ -772,10 +836,9 @@ impl zksync_vm2::StorageInterface for World { // For value compression, we use a metadata byte which holds the length of the value and the operation from the // previous state to the new state, and the compressed value. The maximum for this is 33 bytes. // Total bytes for initial writes then becomes 65 bytes and repeated writes becomes 38 bytes. - let compressed_value_size = - compress_with_best_strategy(initial_value, new_value).len() as u32; + let compressed_value_size = compress_with_best_strategy(slot.value, new_value).len() as u32; - if is_initial { + if slot.is_write_initial { (BYTES_PER_DERIVED_KEY as u32) + compressed_value_size } else { (BYTES_PER_ENUMERATION_INDEX as u32) + compressed_value_size diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs index 122704c24b9..2085bbaba31 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/state.rs @@ -1,14 +1,15 @@ use std::cmp::Ordering; use once_cell::sync::OnceCell; -use zksync_types::{commitment::PubdataParams, L2ChainId, U256}; +use zksync_types::{L2ChainId, ProtocolVersionId, U256}; +use zksync_vm_interface::pubdata::PubdataBuilder; -use super::{ - tx::BootloaderTx, - utils::{apply_pubdata_to_memory, get_encoded_pubdata}, -}; +use super::{tx::BootloaderTx, utils::apply_pubdata_to_memory}; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, TxExecutionMode}, + interface::{ + pubdata::PubdataInput, BootloaderMemory, CompressedBytecodeInfo, L2BlockEnv, + TxExecutionMode, + }, vm_latest::{ bootloader_state::{ l2_block::BootloaderL2Block, @@ -16,9 +17,8 @@ use crate::{ utils::{apply_l2_block, apply_tx_to_memory}, }, constants::TX_DESCRIPTION_OFFSET, - types::internals::{PubdataInput, TransactionData}, + types::internals::TransactionData, utils::l2_blocks::assert_next_block, - MultiVMSubversion, }, }; @@ -49,10 +49,8 @@ pub struct BootloaderState { free_tx_offset: usize, /// Information about the the pubdata that will be needed to supply to the L1Messenger pubdata_information: OnceCell, - /// Params related to how the pubdata should be processed by the bootloader in the batch - pubdata_params: PubdataParams, - /// VM subversion - subversion: MultiVMSubversion, + /// Protocol version. + protocol_version: ProtocolVersionId, } impl BootloaderState { @@ -60,8 +58,7 @@ impl BootloaderState { execution_mode: TxExecutionMode, initial_memory: BootloaderMemory, first_l2_block: L2BlockEnv, - pubdata_params: PubdataParams, - subversion: MultiVMSubversion, + protocol_version: ProtocolVersionId, ) -> Self { let l2_block = BootloaderL2Block::new(first_l2_block, 0); Self { @@ -72,8 +69,7 @@ impl BootloaderState { execution_mode, free_tx_offset: 0, pubdata_information: Default::default(), - pubdata_params, - subversion, + protocol_version, } } @@ -154,22 +150,13 @@ impl BootloaderState { .expect("Pubdata information is not set") } - pub(crate) fn get_encoded_pubdata(&self) -> Vec { + pub(crate) fn settlement_layer_pubdata(&self, pubdata_builder: &dyn PubdataBuilder) -> Vec { let pubdata_information = self .pubdata_information .get() - .expect("Pubdata information is not set") - .clone(); + .expect("Pubdata information is not set"); - match self.subversion { - MultiVMSubversion::SmallBootloaderMemory - | MultiVMSubversion::IncreasedBootloaderMemory => { - pubdata_information.build_pubdata_legacy(false) - } - MultiVMSubversion::Gateway => { - get_encoded_pubdata(pubdata_information, self.pubdata_params, false) - } - } + pubdata_builder.settlement_layer_pubdata(pubdata_information, self.protocol_version) } fn last_mut_l2_block(&mut self) -> &mut BootloaderL2Block { @@ -177,7 +164,10 @@ impl BootloaderState { } /// Apply all bootloader transaction to the initial memory - pub(crate) fn bootloader_memory(&self) -> BootloaderMemory { + pub(crate) fn bootloader_memory( + &self, + pubdata_builder: &dyn PubdataBuilder, + ) -> BootloaderMemory { let mut initial_memory = self.initial_memory.clone(); let mut offset = 0; let mut compressed_bytecodes_offset = 0; @@ -205,15 +195,14 @@ impl BootloaderState { let pubdata_information = self .pubdata_information - .clone() - .into_inner() + .get() .expect("Empty pubdata information"); apply_pubdata_to_memory( &mut initial_memory, + pubdata_builder, pubdata_information, - self.pubdata_params, - self.subversion, + self.protocol_version, ); initial_memory } @@ -328,11 +317,7 @@ impl BootloaderState { } } - pub(crate) fn get_pubdata_params(&self) -> PubdataParams { - self.pubdata_params - } - - pub(crate) fn get_vm_subversion(&self) -> MultiVMSubversion { - self.subversion + pub(crate) fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version } } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 09085402b0d..c409bda35c1 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -1,12 +1,12 @@ -use zksync_types::{ - commitment::{L1BatchCommitmentMode, PubdataParams}, - ethabi, U256, -}; +use zksync_types::{ethabi, ProtocolVersionId, U256}; use zksync_utils::{bytes_to_be_words, h256_to_u256}; use super::tx::BootloaderTx; use crate::{ - interface::{BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode}, + interface::{ + pubdata::{PubdataBuilder, PubdataInput}, + BootloaderMemory, CompressedBytecodeInfo, TxExecutionMode, + }, utils::bytecode, vm_latest::{ bootloader_state::l2_block::BootloaderL2Block, @@ -17,11 +17,6 @@ use crate::{ TX_DESCRIPTION_OFFSET, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, TX_OVERHEAD_OFFSET, TX_TRUSTED_GAS_LIMIT_OFFSET, }, - types::internals::{ - pubdata::{PubdataBuilder, RollupPubdataBuilder, ValidiumPubdataBuilder}, - PubdataInput, - }, - MultiVMSubversion, }, }; @@ -131,70 +126,60 @@ fn apply_l2_block_inner( ]) } -pub(crate) fn get_encoded_pubdata( - pubdata_information: PubdataInput, - pubdata_params: PubdataParams, - l2_version: bool, +fn bootloader_memory_input( + pubdata_builder: &dyn PubdataBuilder, + input: &PubdataInput, + protocol_version: ProtocolVersionId, ) -> Vec { - let pubdata_bytes: Vec = if pubdata_params.pubdata_type == L1BatchCommitmentMode::Rollup { - RollupPubdataBuilder::new().build_pubdata(pubdata_information, l2_version) - } else { - ValidiumPubdataBuilder::new().build_pubdata(pubdata_information, l2_version) - }; + let l2_da_validator_address = pubdata_builder.l2_da_validator(); + let operator_input = pubdata_builder.l1_messenger_operator_input(input, protocol_version); - if l2_version { - ethabi::encode(&[ - ethabi::Token::Address(pubdata_params.l2_da_validator_address), - ethabi::Token::Bytes(pubdata_bytes), - ]) - .to_vec() - } else { - pubdata_bytes - } + ethabi::encode(&[ + ethabi::Token::Address(l2_da_validator_address), + ethabi::Token::Bytes(operator_input), + ]) } pub(crate) fn apply_pubdata_to_memory( memory: &mut BootloaderMemory, - pubdata_information: PubdataInput, - pubdata_params: PubdataParams, - subversion: MultiVMSubversion, + pubdata_builder: &dyn PubdataBuilder, + pubdata_information: &PubdataInput, + protocol_version: ProtocolVersionId, ) { - let (l1_messenger_pubdata_start_slot, pubdata) = match subversion { - MultiVMSubversion::SmallBootloaderMemory | MultiVMSubversion::IncreasedBootloaderMemory => { - // Skipping two slots as they will be filled by the bootloader itself: - // - One slot is for the selector of the call to the L1Messenger. - // - The other slot is for the 0x20 offset for the calldata. - let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; - - // Need to skip first word as it represents array offset - // while bootloader expects only [len || data] - let pubdata = ethabi::encode(&[ethabi::Token::Bytes( - pubdata_information.build_pubdata_legacy(true), - )])[32..] - .to_vec(); - - assert!( - pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, - "The encoded pubdata is too big" - ); - - (l1_messenger_pubdata_start_slot, pubdata) - } - MultiVMSubversion::Gateway => { - // Skipping the first slot as it will be filled by the bootloader itself: - // It is for the selector of the call to the L1Messenger. - let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 1; - - let pubdata = get_encoded_pubdata(pubdata_information, pubdata_params, true); - - assert!( - // Note that unlike the previous version, the difference is `1`, since now it also includes the offset - pubdata.len() / 32 < OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, - "The encoded pubdata is too big" - ); - - (l1_messenger_pubdata_start_slot, pubdata) - } + let (l1_messenger_pubdata_start_slot, pubdata) = if protocol_version.is_pre_gateway() { + // Skipping two slots as they will be filled by the bootloader itself: + // - One slot is for the selector of the call to the L1Messenger. + // - The other slot is for the 0x20 offset for the calldata. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 2; + + // Need to skip first word as it represents array offset + // while bootloader expects only [len || data] + let pubdata = ethabi::encode(&[ethabi::Token::Bytes( + pubdata_builder.l1_messenger_operator_input(pubdata_information, protocol_version), + )])[32..] + .to_vec(); + + assert!( + pubdata.len() / 32 <= OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS - 2, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) + } else { + // Skipping the first slot as it will be filled by the bootloader itself: + // It is for the selector of the call to the L1Messenger. + let l1_messenger_pubdata_start_slot = OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_OFFSET + 1; + + let pubdata = + bootloader_memory_input(pubdata_builder, pubdata_information, protocol_version); + + assert!( + // Note that unlike the previous version, the difference is `1`, since now it also includes the offset + pubdata.len() / 32 < OPERATOR_PROVIDED_L1_MESSENGER_PUBDATA_SLOTS, + "The encoded pubdata is too big" + ); + + (l1_messenger_pubdata_start_slot, pubdata) }; pubdata @@ -227,8 +212,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index 2f23bfb89f0..d9331720ce2 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -14,6 +14,7 @@ use crate::{ circuits_capacity::circuit_statistic_from_cycles, dispatcher::TracerDispatcher, DefaultExecutionTracer, PubdataTracer, RefundsTracer, }, + utils::extract_bytecodes_marked_as_known, vm::Vm, }, HistoryMode, @@ -55,6 +56,10 @@ impl Vm { .then_some(RefundsTracer::new(self.batch_env.clone(), self.subversion)); let mut tx_tracer: DefaultExecutionTracer = DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, + self.system_env + .base_system_smart_contracts + .evm_emulator + .is_some(), execution_mode, mem::take(dispatcher), self.storage.clone(), @@ -64,7 +69,7 @@ impl Vm { self.batch_env.clone(), execution_mode, self.subversion, - self.system_env.version, + None, )) }), self.subversion, @@ -96,6 +101,8 @@ impl Vm { circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); + let factory_deps_marked_as_known = extract_bytecodes_marked_as_known(&logs.events); + let new_known_factory_deps = self.decommit_bytecodes(&factory_deps_marked_as_known); *dispatcher = tx_tracer.dispatcher; let result = VmExecutionResultAndLogs { @@ -103,6 +110,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: Some(new_known_factory_deps), }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index 34c1e1f81da..c1cf1504356 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -51,7 +51,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs index 98d71efa00f..6dd73866adf 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/tx.rs @@ -59,7 +59,12 @@ impl Vm { tx: Transaction, with_compression: bool, ) { - let tx: TransactionData = tx.into(); + let use_evm_emulator = self + .system_env + .base_system_smart_contracts + .evm_emulator + .is_some(); + let tx = TransactionData::new(tx, use_evm_emulator); let overhead = tx.overhead_gas(); self.push_raw_transaction(tx, overhead, 0, with_compression); } diff --git a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs index 0315aa38327..d91fbfdb24d 100644 --- a/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs +++ b/core/lib/multivm/src/versions/vm_latest/old_vm/oracles/decommitter.rs @@ -5,9 +5,7 @@ use zk_evm_1_5_0::{ aux_structures::{ DecommittmentQuery, MemoryIndex, MemoryLocation, MemoryPage, MemoryQuery, Timestamp, }, - zkevm_opcode_defs::{ - ContractCodeSha256, VersionedHashDef, VersionedHashHeader, VersionedHashNormalizedPreimage, - }, + zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, }; use zksync_types::{H256, U256}; use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; @@ -166,8 +164,8 @@ impl DecommittmentProcess _monotonic_cycle_counter: u32, mut partial_query: DecommittmentQuery, ) -> anyhow::Result { - let (stored_hash, length) = stored_hash_from_query(&partial_query); - partial_query.decommitted_length = length; + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); if let Some(memory_page) = self .decommitted_code_hashes @@ -178,10 +176,10 @@ impl DecommittmentProcess { partial_query.is_fresh = false; partial_query.memory_page = MemoryPage(memory_page); + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } else { - partial_query.is_fresh = true; if self .decommitted_code_hashes .inner() @@ -190,7 +188,9 @@ impl DecommittmentProcess { self.decommitted_code_hashes .insert(stored_hash, None, partial_query.timestamp); - } + }; + partial_query.is_fresh = true; + partial_query.decommitted_length = versioned_hash.get_preimage_length() as u16; Ok(partial_query) } @@ -204,11 +204,10 @@ impl DecommittmentProcess memory: &mut M, ) -> anyhow::Result>> { assert!(partial_query.is_fresh); - self.decommitment_requests.push((), partial_query.timestamp); - let stored_hash = stored_hash_from_query(&partial_query).0; - + let versioned_hash = VersionedCodeHash::from_query(&partial_query); + let stored_hash = versioned_hash.to_stored_hash(); // We are fetching a fresh bytecode that we didn't read before. let values = self.get_bytecode(stored_hash, partial_query.timestamp); let page_to_use = partial_query.memory_page; @@ -251,28 +250,49 @@ impl DecommittmentProcess } } -fn concat_header_and_preimage( - header: VersionedHashHeader, - normalized_preimage: VersionedHashNormalizedPreimage, -) -> [u8; 32] { - let mut buffer = [0u8; 32]; +#[derive(Debug)] +// TODO: consider moving this to the zk-evm crate +enum VersionedCodeHash { + ZkEVM(VersionedHashHeader, VersionedHashNormalizedPreimage), + Evm(VersionedHashHeader, VersionedHashNormalizedPreimage), +} - buffer[0..4].copy_from_slice(&header.0); - buffer[4..32].copy_from_slice(&normalized_preimage.0); +impl VersionedCodeHash { + fn from_query(query: &DecommittmentQuery) -> Self { + match query.header.0[0] { + 1 => Self::ZkEVM(query.header, query.normalized_preimage), + 2 => Self::Evm(query.header, query.normalized_preimage), + _ => panic!("Unsupported hash version"), + } + } - buffer -} + /// Returns the hash in the format it is stored in the DB. + fn to_stored_hash(&self) -> U256 { + let (header, preimage) = match self { + Self::ZkEVM(header, preimage) => (header, preimage), + Self::Evm(header, preimage) => (header, preimage), + }; -/// For a given decommitment query, returns a pair of the stored hash as U256 and the length of the preimage in 32-byte words. -fn stored_hash_from_query(partial_query: &DecommittmentQuery) -> (U256, u16) { - let full_hash = - concat_header_and_preimage(partial_query.header, partial_query.normalized_preimage); + let mut hash = [0u8; 32]; + hash[0..4].copy_from_slice(&header.0); + hash[4..32].copy_from_slice(&preimage.0); - let versioned_hash = - ContractCodeSha256::try_deserialize(full_hash).expect("Invalid ContractCodeSha256 hash"); + // Hash[1] is used in both of the versions to denote whether the bytecode is being constructed. + // We ignore this param. + hash[1] = 0; - let stored_hash = H256(ContractCodeSha256::serialize_to_stored(versioned_hash).unwrap()); - let length = versioned_hash.code_length_in_words; + h256_to_u256(H256(hash)) + } - (h256_to_u256(stored_hash), length) + fn get_preimage_length(&self) -> u32 { + // In zkEVM the hash[2..3] denotes the length of the preimage in words, while + // in EVM the hash[2..3] denotes the length of the preimage in bytes. + match self { + Self::ZkEVM(header, _) => { + let length_in_words = header.0[2] as u32 * 256 + header.0[3] as u32; + length_in_words * 32 + } + Self::Evm(header, _) => header.0[2] as u32 * 256 + header.0[3] as u32, + } + } } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index e4948f18030..df4a36f2d3d 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -1,430 +1,9 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, fee_model::BatchFeeInput, get_code_key, - l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, Execute, ProtocolVersionId, H256, - U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; use crate::{ - interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tests::tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTesterBuilder, - }, - tracers::PubdataTracer, - HistoryEnabled, TracerDispatcher, - }, + versions::testonly::block_tip::test_dry_run_upper_bound, + vm_latest::{HistoryEnabled, Vm}, }; -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -const CALLS_PER_TX: usize = 1_000; -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec> { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .chunks(CALLS_PER_TX) - .into_iter() - .map(|chunk| { - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(chunk.collect_vec())]) - .unwrap() - }) - .collect_vec(); - - encoded_calls -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - // We are measuring computational cost, so prices for pubdata don't matter, while they artificially dilute - // the gas limit - - let batch_env = L1BatchEnv { - fee_input: BatchFeeInput::pubdata_independent(100_000, 100_000, 100_000), - ..default_l1_batch(zksync_types::L1BatchNumber(1)) - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_l1_batch_env(batch_env) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let txs_data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - - for (i, data) in txs_data.into_iter().enumerate() { - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), - calldata: data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction {i} wasn't successful for input: {:#?}", - test_data - ); - } - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - crate::vm_latest::MultiVMSubversion::latest(), - ProtocolVersionId::Version25, - ); - - let result = vm.vm.inspect_inner( - &mut TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used - ); - - TestStatistics { - max_used_gas: ergs_before - ergs_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} - #[test] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let statistics = vec![ - // max logs - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }, - // max messages - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }, - // long message - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }, - // max bytecodes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }, - // long bytecode - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![ - vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; - 1 - ], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }, - // lots of small repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }, - // lots of big repeated writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - true, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37, - ), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }, - // lots of small initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs( - false, - true, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33, - ), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }, - // lots of large initial writes - StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs( - false, - false, - MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65, - ), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }, - ]; - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 3 / 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); +fn dry_run_upper_bound() { + test_dry_run_upper_bound::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs index 9d23f658cb8..22239a6c1e3 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bootloader.rs @@ -1,57 +1,14 @@ -use assert_matches::assert_matches; -use zksync_types::U256; - use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, - vm_latest::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, + versions::testonly::bootloader::{test_bootloader_out_of_gas, test_dummy_bootloader}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); +fn dummy_bootloader() { + test_dummy_bootloader::>(); } #[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); +fn bootloader_out_of_gas() { + test_bootloader_out_of_gas::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs index 2ed9948af81..e0727fbed89 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/bytecode_publishing.rs @@ -1,41 +1,9 @@ use crate::{ - interface::{TxExecutionMode, VmEvent, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::bytecode, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, + versions::testonly::bytecode_publishing::test_bytecode_publishing, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = bytecode::compress(counter.clone()).unwrap().compressed; - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = VmEvent::extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); +fn bytecode_publishing() { + test_bytecode_publishing::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index e7f26b7faf8..b502ea50b1a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -3,17 +3,14 @@ use std::sync::Arc; use once_cell::sync::OnceCell; use zksync_types::{Address, Execute}; +use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface}, tracers::CallTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, + versions::testonly::{ + read_max_depth_contract, read_test_contract, ContractToDeploy, VmTesterBuilder, }, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, }; // This test is ultra slow, so it's ignored by default. @@ -22,14 +19,13 @@ use crate::{ fn test_max_depth() { let contarct = read_max_depth_contract(); let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); + .with_custom_contracts(vec![ContractToDeploy::account(contarct, address)]) + .build::(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( @@ -47,23 +43,22 @@ fn test_max_depth() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); assert!(result.get().is_some()); assert!(res.result.is_failed()); } #[test] fn test_basic_behavior() { - let contarct = read_test_contract(); + let contract = read_test_contract(); let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) + .build::(); let increment_by_6_calldata = "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; @@ -84,7 +79,7 @@ fn test_basic_behavior() { vm.vm.push_transaction(tx); let res = vm .vm - .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); + .inspect(&mut call_tracer.into(), InspectExecutionMode::OneTx); let call_tracer_result = result.get().unwrap(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index c3c6816cbd8..690af7d2a35 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -1,76 +1,9 @@ -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, + versions::testonly::circuits::test_circuits, + vm_latest::{HistoryEnabled, Vm}, }; -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. #[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(Address::random()), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 13] = [ - 1.34935, 0.15026, 1.66666, 0.00315, 1.0594, 0.00058, 0.00348, 0.00076, 0.11945, 0.14285, - 0.0, 0.0, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - (s.secp256k1_verify, "secp256k1_verify"), - (s.transient_storage_checker, "transient_storage_checker"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } +fn circuits() { + test_circuits::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index b15ef7fde2b..e50e2aafcbf 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -1,282 +1,21 @@ -use ethabi::Token; -use zk_evm_1_5_0::{ - aux_structures::{MemoryPage, Timestamp}, - zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, -}; -use zksync_types::{ - get_known_code_key, web3::keccak256, Address, Execute, StorageLogWithPreviousValue, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, - }, - HistoryEnabled, + versions::testonly::code_oracle::{ + test_code_oracle, test_code_oracle_big_bytecode, test_refunds_in_code_oracle, }, + vm_latest::{HistoryEnabled, Vm}, }; -fn generate_large_bytecode() -> Vec { - // This is the maximal possible size of a zkEVM bytecode - vec![2u8; ((1 << 16) - 1) * 32] -} - #[test] -fn test_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - // Filling the zkevm bytecode - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode, - precompiles_contract_address, - false, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(normal_zkevm_bytecode_hash), - bytes_to_be_words(normal_zkevm_bytecode), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // Now, we ask for the same bytecode. We use to partially check whether the memory page with - // the decommitted bytecode gets erased (it shouldn't). - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx2); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); -} - -fn find_code_oracle_cost_log( - precompiles_contract_address: Address, - logs: &[StorageLogWithPreviousValue], -) -> &StorageLogWithPreviousValue { - logs.iter() - .find(|log| { - *log.log.key.address() == precompiles_contract_address && log.log.key.key().is_zero() - }) - .expect("no code oracle cost log") +fn code_oracle() { + test_code_oracle::>(); } #[test] -fn test_code_oracle_big_bytecode() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let big_zkevm_bytecode = generate_large_bytecode(); - let big_zkevm_bytecode_hash = hash_bytecode(&big_zkevm_bytecode); - let big_zkevm_bytecode_keccak_hash = keccak256(&big_zkevm_bytecode); - - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&big_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode, - precompiles_contract_address, - false, - )]) - .with_storage(storage) - .build(); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(big_zkevm_bytecode_hash), - bytes_to_be_words(big_zkevm_bytecode), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, let's ensure that the contract works. - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(big_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); +fn code_oracle_big_bytecode() { + test_code_oracle_big_bytecode::>(); } #[test] fn refunds_in_code_oracle() { - let precompiles_contract_address = Address::random(); - let precompile_contract_bytecode = read_precompiles_contract(); - - let normal_zkevm_bytecode = read_test_contract(); - let normal_zkevm_bytecode_hash = hash_bytecode(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_keccak_hash = keccak256(&normal_zkevm_bytecode); - let normal_zkevm_bytecode_words = bytes_to_be_words(normal_zkevm_bytecode); - let mut storage = get_empty_storage(); - storage.set_value( - get_known_code_key(&normal_zkevm_bytecode_hash), - u256_to_h256(U256::one()), - ); - - let precompile_contract = load_precompiles_contract(); - let call_code_oracle_function = precompile_contract.function("callCodeOracle").unwrap(); - - // Execute code oracle twice with identical VM state that only differs in that the queried bytecode - // is already decommitted the second time. The second call must consume less gas (`decommit` doesn't charge additional gas - // for already decommitted codes). - let mut oracle_costs = vec![]; - for decommit in [false, true] { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - precompile_contract_bytecode.clone(), - precompiles_contract_address, - false, - )]) - .with_storage(storage.clone()) - .build(); - - vm.vm.state.decommittment_processor.populate( - vec![( - h256_to_u256(normal_zkevm_bytecode_hash), - normal_zkevm_bytecode_words.clone(), - )], - Timestamp(0), - ); - - let account = &mut vm.rich_accounts[0]; - if decommit { - let (header, normalized_preimage) = - ContractCodeSha256Format::normalize_for_decommitment(&normal_zkevm_bytecode_hash.0); - let query = vm - .vm - .state - .prepare_to_decommit( - 0, - header, - normalized_preimage, - MemoryPage(123), - Timestamp(0), - ) - .unwrap(); - - assert!(query.is_fresh); - vm.vm.state.execute_decommit(0, query).unwrap(); - } - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(precompiles_contract_address), - calldata: call_code_oracle_function - .encode_input(&[ - Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), - Token::FixedBytes(normal_zkevm_bytecode_keccak_hash.to_vec()), - ]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - let log = - find_code_oracle_cost_log(precompiles_contract_address, &result.logs.storage_logs); - oracle_costs.push(log.log.value); - } - - // The refund is equal to `gasCost` parameter passed to the `decommit` opcode, which is defined as `4 * contract_length_in_words` - // in `CodeOracle.yul`. - let code_oracle_refund = h256_to_u256(oracle_costs[0]) - h256_to_u256(oracle_costs[1]); - assert_eq!( - code_oracle_refund, - (4 * normal_zkevm_bytecode_words.len()).into() - ); + test_refunds_in_code_oracle::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs index ad00bbb1574..3d0e21c2466 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/default_aa.rs @@ -1,86 +1,9 @@ -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - HistoryEnabled, - }, + versions::testonly::default_aa::test_default_aa_interaction, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - if batch_result.result.is_failed() { - panic!("Batch execution failed: {:?}", batch_result.result); - } - assert!( - !batch_result.result.is_failed(), - "Transaction wasn't successful" - ); - - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); +fn default_aa_interaction() { + test_default_aa_interaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs new file mode 100644 index 00000000000..4d6e77aed51 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/evm_emulator.rs @@ -0,0 +1,507 @@ +use std::collections::HashMap; + +use ethabi::Token; +use test_casing::{test_casing, Product}; +use zksync_contracts::{load_contract, read_bytecode, SystemContractCode}; +use zksync_system_constants::{ + CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L2_BASE_TOKEN_ADDRESS, +}; +use zksync_test_account::TxType; +use zksync_types::{ + get_code_key, get_known_code_key, + utils::{key_for_eth_balance, storage_key_for_eth_balance}, + AccountTreeId, Address, Execute, StorageKey, H256, U256, +}; +use zksync_utils::{ + be_words_to_bytes, + bytecode::{hash_bytecode, hash_evm_bytecode}, + bytes_to_be_words, h256_to_u256, +}; + +use super::TestedLatestVm; +use crate::{ + interface::{ + storage::InMemoryStorage, TxExecutionMode, VmExecutionResultAndLogs, VmInterfaceExt, + }, + versions::testonly::{default_system_env, VmTester, VmTesterBuilder}, +}; + +const MOCK_DEPLOYER_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockContractDeployer.json"; +const MOCK_KNOWN_CODE_STORAGE_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockKnownCodeStorage.json"; +const MOCK_EMULATOR_PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/MockEvmEmulator.json"; +const RECURSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/NativeRecursiveContract.json"; +const INCREMENTING_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/mock-evm/mock-evm.sol/IncrementingContract.json"; + +fn override_system_contracts(storage: &mut InMemoryStorage) { + let mock_deployer = read_bytecode(MOCK_DEPLOYER_PATH); + let mock_deployer_hash = hash_bytecode(&mock_deployer); + let mock_known_code_storage = read_bytecode(MOCK_KNOWN_CODE_STORAGE_PATH); + let mock_known_code_storage_hash = hash_bytecode(&mock_known_code_storage); + + storage.set_value(get_code_key(&CONTRACT_DEPLOYER_ADDRESS), mock_deployer_hash); + storage.set_value( + get_known_code_key(&mock_deployer_hash), + H256::from_low_u64_be(1), + ); + storage.set_value( + get_code_key(&KNOWN_CODES_STORAGE_ADDRESS), + mock_known_code_storage_hash, + ); + storage.set_value( + get_known_code_key(&mock_known_code_storage_hash), + H256::from_low_u64_be(1), + ); + storage.store_factory_dep(mock_deployer_hash, mock_deployer); + storage.store_factory_dep(mock_known_code_storage_hash, mock_known_code_storage); +} + +#[derive(Debug)] +struct EvmTestBuilder { + deploy_emulator: bool, + storage: InMemoryStorage, + evm_contract_addresses: Vec
, +} + +impl EvmTestBuilder { + fn new(deploy_emulator: bool, evm_contract_address: Address) -> Self { + Self { + deploy_emulator, + storage: InMemoryStorage::with_system_contracts(hash_bytecode), + evm_contract_addresses: vec![evm_contract_address], + } + } + + fn with_mock_deployer(mut self) -> Self { + override_system_contracts(&mut self.storage); + self + } + + fn with_evm_address(mut self, address: Address) -> Self { + self.evm_contract_addresses.push(address); + self + } + + fn build(self) -> VmTester { + let mock_emulator = read_bytecode(MOCK_EMULATOR_PATH); + let mut storage = self.storage; + let mut system_env = default_system_env(); + if self.deploy_emulator { + let evm_bytecode: Vec<_> = (0..32).collect(); + let evm_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + storage.set_value( + get_known_code_key(&evm_bytecode_hash), + H256::from_low_u64_be(1), + ); + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), evm_bytecode_hash); + } + + system_env.base_system_smart_contracts.evm_emulator = Some(SystemContractCode { + hash: hash_bytecode(&mock_emulator), + code: bytes_to_be_words(mock_emulator), + }); + } else { + let emulator_hash = hash_bytecode(&mock_emulator); + storage.set_value(get_known_code_key(&emulator_hash), H256::from_low_u64_be(1)); + storage.store_factory_dep(emulator_hash, mock_emulator); + + for evm_address in self.evm_contract_addresses { + storage.set_value(get_code_key(&evm_address), emulator_hash); + // Set `isUserSpace` in the emulator storage to `true`, so that it skips emulator-specific checks + storage.set_value( + StorageKey::new(AccountTreeId::new(evm_address), H256::zero()), + H256::from_low_u64_be(1), + ); + } + } + + VmTesterBuilder::new() + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build() + } +} + +#[test] +fn tracing_evm_contract_deployment() { + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + override_system_contracts(&mut storage); + + let mut system_env = default_system_env(); + // The EVM emulator will not be accessed, so we set it to a dummy value. + system_env.base_system_smart_contracts.evm_emulator = + Some(system_env.base_system_smart_contracts.default_aa.clone()); + let mut vm = VmTesterBuilder::new() + .with_system_env(system_env) + .with_storage(storage) + .with_execution_mode(TxExecutionMode::VerifyExecute) + .with_rich_accounts(1) + .build::(); + let account = &mut vm.rich_accounts[0]; + + let args = [Token::Bytes((0..32).collect())]; + let evm_bytecode = ethabi::encode(&args); + let expected_bytecode_hash = hash_evm_bytecode(&evm_bytecode); + let execute = Execute::for_deploy(expected_bytecode_hash, vec![0; 32], &args); + let deploy_tx = account.get_l2_tx_for_execute(execute, None); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Check that the surrogate EVM bytecode was added to the decommitter. + let known_bytecodes = vm.vm.state.decommittment_processor.known_bytecodes.inner(); + let known_evm_bytecode = + be_words_to_bytes(&known_bytecodes[&h256_to_u256(expected_bytecode_hash)]); + assert_eq!(known_evm_bytecode, evm_bytecode); + + let new_known_factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!(new_known_factory_deps.len(), 2); // the deployed EraVM contract + EVM contract + assert_eq!( + new_known_factory_deps[&expected_bytecode_hash], + evm_bytecode + ); +} + +#[test] +fn mock_emulator_basics() { + let called_address = Address::repeat_byte(0x23); + let mut vm = EvmTestBuilder::new(true, called_address).build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(called_address), + calldata: vec![], + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +const RECIPIENT_ADDRESS: Address = Address::repeat_byte(0x12); + +/// `deploy_emulator = false` here and below tests the mock emulator as an ordinary contract (i.e., sanity-checks its logic). +#[test_casing(2, [false, true])] +#[test] +fn mock_emulator_with_payment(deploy_emulator: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let mut vm = EvmTestBuilder::new(deploy_emulator, RECIPIENT_ADDRESS).build(); + + let mut current_balance = U256::zero(); + for i in 1_u64..=5 { + let transferred_value = (1_000_000_000 * i).into(); + let vm_result = test_payment( + &mut vm, + &mock_emulator_abi, + &mut current_balance, + transferred_value, + ); + + let balance_storage_logs = vm_result.logs.storage_logs.iter().filter_map(|log| { + (*log.log.key.address() == L2_BASE_TOKEN_ADDRESS) + .then_some((*log.log.key.key(), h256_to_u256(log.log.value))) + }); + let balances: HashMap<_, _> = balance_storage_logs.collect(); + assert_eq!( + balances[&key_for_eth_balance(&RECIPIENT_ADDRESS)], + current_balance + ); + } +} + +fn test_payment( + vm: &mut VmTester, + mock_emulator_abi: ðabi::Contract, + balance: &mut U256, + transferred_value: U256, +) -> VmExecutionResultAndLogs { + *balance += transferred_value; + let test_payment_fn = mock_emulator_abi.function("testPayment").unwrap(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(RECIPIENT_ADDRESS), + calldata: test_payment_fn + .encode_input(&[Token::Uint(transferred_value), Token::Uint(*balance)]) + .unwrap(), + value: transferred_value, + factory_deps: vec![], + }, + None, + ); + + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + vm_result +} + +#[test_casing(4, Product(([false, true], [false, true])))] +#[test] +fn mock_emulator_with_recursion(deploy_emulator: bool, is_external: bool) { + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(deploy_emulator, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + let test_recursion_fn = mock_emulator_abi + .function(if is_external { + "testExternalRecursion" + } else { + "testRecursion" + }) + .unwrap(); + let mut expected_value = U256::one(); + let depth = 50_u32; + for i in 2..=depth { + expected_value *= i; + } + + let factory_deps = if is_external { + vec![read_bytecode(RECURSIVE_CONTRACT_PATH)] + } else { + vec![] + }; + let tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(recipient_address), + calldata: test_recursion_fn + .encode_input(&[Token::Uint(depth.into()), Token::Uint(expected_value)]) + .unwrap(), + value: 0.into(), + factory_deps, + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn calling_to_mock_emulator_from_native_contract() { + let recipient_address = Address::repeat_byte(0x12); + let mut vm = EvmTestBuilder::new(true, recipient_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(RECURSIVE_CONTRACT_PATH); + let native_contract_abi = load_contract(RECURSIVE_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx( + &native_contract, + Some(&[Token::Address(recipient_address)]), + TxType::L2, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + // Call from the native contract to the EVM emulator. + let test_fn = native_contract_abi.function("recurse").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(deploy_tx.address), + calldata: test_fn.encode_input(&[Token::Uint(50.into())]).unwrap(), + value: Default::default(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test] +fn mock_emulator_with_deployment() { + let contract_address = Address::repeat_byte(0xaa); + let mut vm = EvmTestBuilder::new(true, contract_address) + .with_mock_deployer() + .build(); + let account = &mut vm.rich_accounts[0]; + + let mock_emulator_abi = load_contract(MOCK_EMULATOR_PATH); + let new_evm_bytecode = vec![0xfe; 96]; + let new_evm_bytecode_hash = hash_evm_bytecode(&new_evm_bytecode); + + let test_fn = mock_emulator_abi.function("testDeploymentAndCall").unwrap(); + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(contract_address), + calldata: test_fn + .encode_input(&[ + Token::FixedBytes(new_evm_bytecode_hash.0.into()), + Token::Bytes(new_evm_bytecode.clone()), + ]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); + + let factory_deps = vm_result.new_known_factory_deps.unwrap(); + assert_eq!( + factory_deps, + HashMap::from([(new_evm_bytecode_hash, new_evm_bytecode)]) + ); +} + +#[test] +fn mock_emulator_with_delegate_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testDelegateCall").unwrap(); + // Delegate to the native contract from EVM. + test_delegate_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address); + // Delegate to EVM from the native contract. + test_delegate_call(&mut vm, test_fn, deploy_tx.address, evm_contract_address); + // Delegate to EVM from EVM. + test_delegate_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + ); +} + +fn test_delegate_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn.encode_input(&[Token::Address(to)]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} + +#[test] +fn mock_emulator_with_static_call() { + let evm_contract_address = Address::repeat_byte(0xaa); + let other_evm_contract_address = Address::repeat_byte(0xbb); + let mut builder = EvmTestBuilder::new(true, evm_contract_address); + builder.storage.set_value( + storage_key_for_eth_balance(&evm_contract_address), + H256::from_low_u64_be(1_000_000), + ); + builder.storage.set_value( + storage_key_for_eth_balance(&other_evm_contract_address), + H256::from_low_u64_be(2_000_000), + ); + // Set differing read values for tested contracts. The slot index is defined in the contract. + let value_slot = H256::from_low_u64_be(0x123); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(evm_contract_address), value_slot), + H256::from_low_u64_be(100), + ); + builder.storage.set_value( + StorageKey::new(AccountTreeId::new(other_evm_contract_address), value_slot), + H256::from_low_u64_be(200), + ); + let mut vm = builder.with_evm_address(other_evm_contract_address).build(); + let account = &mut vm.rich_accounts[0]; + + // Deploy a native contract. + let native_contract = read_bytecode(INCREMENTING_CONTRACT_PATH); + let native_contract_abi = load_contract(INCREMENTING_CONTRACT_PATH); + let deploy_tx = account.get_deploy_tx(&native_contract, None, TxType::L2); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + + let test_fn = native_contract_abi.function("testStaticCall").unwrap(); + // Call to the native contract from EVM. + test_static_call(&mut vm, test_fn, evm_contract_address, deploy_tx.address, 0); + // Call to EVM from the native contract. + test_static_call( + &mut vm, + test_fn, + deploy_tx.address, + evm_contract_address, + 100, + ); + // Call to EVM from EVM. + test_static_call( + &mut vm, + test_fn, + evm_contract_address, + other_evm_contract_address, + 200, + ); +} + +fn test_static_call( + vm: &mut VmTester, + test_fn: ðabi::Function, + from: Address, + to: Address, + expected_value: u64, +) { + let account = &mut vm.rich_accounts[0]; + let test_tx = account.get_l2_tx_for_execute( + Execute { + contract_address: Some(from), + calldata: test_fn + .encode_input(&[Token::Address(to), Token::Uint(expected_value.into())]) + .unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (_, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(test_tx, true); + assert!(!vm_result.result.is_failed(), "{vm_result:?}"); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs index cc9aac5bb91..5aa7ab9e9c7 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs @@ -1,46 +1,9 @@ -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Execute}; - use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_latest::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, + versions::testonly::gas_limit::test_tx_gas_limit_offset, + vm_latest::{HistoryEnabled, Vm}, }; -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. #[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Account::default_fee() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); +fn tx_gas_limit_offset() { + test_tx_gas_limit_offset::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index ef19717a627..7f39915f2b6 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -1,247 +1,22 @@ -use std::{ - collections::{HashMap, HashSet}, - iter, - str::FromStr, -}; - -use assert_matches::assert_matches; -use ethabi::Token; -use itertools::Itertools; -use zk_evm_1_3_1::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; -use zk_evm_1_5_0::{ - abstractions::DecommittmentProcessor, - aux_structures::{DecommittmentQuery, MemoryPage, Timestamp}, - zkevm_opcode_defs::{VersionedHashHeader, VersionedHashNormalizedPreimage}, -}; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Address, Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; -use zksync_vm_interface::VmExecutionResultAndLogs; - use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceExt, + versions::testonly::get_used_contracts::{ + test_get_used_contracts, test_get_used_contracts_with_far_call, + test_get_used_contracts_with_out_of_gas_far_call, }, - vm_latest::{ - tests::{ - tester::{TxType, VmTester, VmTesterBuilder}, - utils::{read_proxy_counter_contract, read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata: big_calldata, - value: Default::default(), - factory_deps: vec![vec![1; 32]], - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -#[test] -fn test_contract_is_used_right_after_prepare_to_decommit() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(vm.vm.get_used_contracts().is_empty()); - - let bytecode_hash = - U256::from_str("0x100067ff3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185") - .unwrap(); - vm.vm - .state - .decommittment_processor - .populate(vec![(bytecode_hash, vec![])], Timestamp(0)); - - let header = hex::decode("0100067f").unwrap(); - let normalized_preimage = - hex::decode("f3124f394104ab03481f7923f0bc4029a2aa9d41cc1d848c81257185").unwrap(); - vm.vm - .state - .decommittment_processor - .prepare_to_decommit( - 0, - DecommittmentQuery { - header: VersionedHashHeader(header.try_into().unwrap()), - normalized_preimage: VersionedHashNormalizedPreimage( - normalized_preimage.try_into().unwrap(), - ), - timestamp: Timestamp(0), - memory_page: MemoryPage(0), - decommitted_length: 0, - is_fresh: false, - }, - ) - .unwrap(); - - assert_eq!(vm.vm.get_used_contracts(), vec![bytecode_hash]); -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - known_bytecodes_without_aa_code -} - -/// Counter test contract bytecode inflated by appending lots of `NOP` opcodes at the end. This leads to non-trivial -/// decommitment cost (>10,000 gas). -fn inflated_counter_bytecode() -> Vec { - let mut counter_bytecode = read_test_contract(); - counter_bytecode.extend( - iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) - .take(10_000) - .flatten(), - ); - counter_bytecode -} - -fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecutionResultAndLogs) { - let counter_bytecode = inflated_counter_bytecode(); - let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); - let counter_address = Address::repeat_byte(0x23); - - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let (proxy_counter_bytecode, proxy_counter_abi) = read_proxy_counter_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx( - &proxy_counter_bytecode, - Some(&[Token::Address(counter_address)]), - TxType::L2, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(deploy_tx.tx, true); - compression_result.unwrap(); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - !decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); - - let increment = proxy_counter_abi.function("increment").unwrap(); - let increment_tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(deploy_tx.address), - calldata: increment - .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) - .unwrap(), - value: 0.into(), - factory_deps: vec![], - }, - None, - ); - let (compression_result, exec_result) = vm - .vm - .execute_transaction_with_bytecode_compression(increment_tx, true); - compression_result.unwrap(); - (vm, counter_bytecode_hash, exec_result) +fn get_used_contracts() { + test_get_used_contracts::>(); } #[test] fn get_used_contracts_with_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(100_000); - assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_far_call::>(); } #[test] fn get_used_contracts_with_out_of_gas_far_call() { - let (vm, counter_bytecode_hash, exec_result) = execute_proxy_counter(10_000); - assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); - let decommitted_hashes = vm.vm.get_used_contracts(); - assert!( - decommitted_hashes.contains(&counter_bytecode_hash), - "{decommitted_hashes:?}" - ); + test_get_used_contracts_with_out_of_gas_far_call::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs index 8206cfa9be6..193fc586079 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/is_write_initial.rs @@ -1,49 +1,9 @@ -use zksync_types::get_nonce_key; - use crate::{ - interface::{ - storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, - }, - vm_latest::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, + versions::testonly::is_write_initial::test_is_write_initial_behaviour, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); +fn is_write_initial_behaviour() { + test_is_write_initial_behaviour::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs index f8e4934050b..fcb718c7349 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_messenger.rs @@ -1,158 +1,156 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_types::{web3::keccak256, Execute, L1_MESSENGER_ADDRESS, U256}; -use zksync_utils::{address_to_h256, u256_to_h256}; -use zksync_vm_interface::VmInterfaceExt; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::ZK_SYNC_BYTES_PER_BLOB, - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::{ - pubdata::{PubdataBuilder, RollupPubdataBuilder}, - PubdataInput, - }, - HistoryEnabled, - }, -}; - -pub(crate) const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 5; -pub(crate) const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 6; - -pub(crate) fn encoded_uncompressed_state_diffs(input: &PubdataInput) -> Vec { - let mut result = vec![]; - for state_diff in input.state_diffs.iter() { - result.extend(state_diff.encode_padded()); - } - result -} - -pub fn compose_header_for_l1_commit_rollup(input: PubdataInput) -> Vec { - // The preimage under the hash `l2DAValidatorOutputHash` is expected to be in the following format: - // - First 32 bytes are the hash of the uncompressed state diff. - // - Then, there is a 32-byte hash of the full pubdata. - // - Then, there is the 1-byte number of blobs published. - // - Then, there are linear hashes of the published blobs, 32 bytes each. - - let mut full_header = vec![]; - - let uncompressed_state_diffs = encoded_uncompressed_state_diffs(&input); - let uncompressed_state_diffs_hash = keccak256(&uncompressed_state_diffs); - full_header.extend(uncompressed_state_diffs_hash); - - let mut full_pubdata = RollupPubdataBuilder::new().build_pubdata(input, false); - let full_pubdata_hash = keccak256(&full_pubdata); - full_header.extend(full_pubdata_hash); - - // Now, we need to calculate the linear hashes of the blobs. - // Firstly, let's pad the pubdata to the size of the blob. - if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { - let padding = - vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; - full_pubdata.extend(padding); - } - full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); - - full_pubdata - .chunks(ZK_SYNC_BYTES_PER_BLOB) - .for_each(|chunk| { - full_header.extend(keccak256(chunk)); - }); - - full_header -} - -#[test] -fn test_publish_and_clear_state() { - // In this test, we check whether the L2 DA output hash is as expected. - // We will publish 320kb worth of pubdata. - // It should produce 3 blobs. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - // Firstly, deploy tx. It should publish the bytecode of the "test contract" - let counter = read_test_contract(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - // We do not use compression here, to have the bytecode published in full. - vm.vm.push_transaction_with_compression(tx, false); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - // Then, we call the l1 messenger to also send an L2->L1 message. - let l1_messenger_contract = l1_messenger_contract(); - let encoded_data = l1_messenger_contract - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(vec![])]) - .unwrap(); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - calldata: encoded_data, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - if batch_result.result.is_failed() { - panic!("Batch execution failed: {:?}", batch_result.result); - } - assert!( - !batch_result.result.is_failed(), - "Transaction wasn't successful" - ); - let pubdata_input = vm.vm.bootloader_state.get_pubdata_information().clone(); - - // Just to double check that the test makes sense. - assert!(!pubdata_input.user_logs.is_empty()); - assert!(!pubdata_input.l2_to_l1_messages.is_empty()); - assert!(!pubdata_input.published_bytecodes.is_empty()); - assert!(!pubdata_input.state_diffs.is_empty()); - - let expected_header: Vec = compose_header_for_l1_commit_rollup(pubdata_input); - - let l2_da_validator_output_hash = batch_result - .logs - .system_l2_to_l1_logs - .iter() - .find(|log| log.0.key == u256_to_h256(L2_DA_VALIDATOR_OUTPUT_HASH_KEY.into())) - .unwrap() - .0 - .value; - - assert_eq!( - l2_da_validator_output_hash, - keccak256(&expected_header).into() - ); - - let l2_used_da_validator_address = batch_result - .logs - .system_l2_to_l1_logs - .iter() - .find(|log| log.0.key == u256_to_h256(USED_L2_DA_VALIDATOR_ADDRESS_KEY.into())) - .unwrap() - .0 - .value; - - assert_eq!( - l2_used_da_validator_address, - address_to_h256(&vm.vm.system_env.pubdata_params.l2_da_validator_address) - ); -} +// // TODO: move to shared tests +// +// use ethabi::Token; +// use zksync_contracts::l1_messenger_contract; +// use zksync_types::{web3::keccak256, Execute, L1_MESSENGER_ADDRESS, U256}; +// use zksync_utils::{address_to_h256, u256_to_h256}; +// +// use crate::{ +// interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, pubdata::{PubdataInput, PubdataBuilder}}, +// vm_latest::{ +// constants::ZK_SYNC_BYTES_PER_BLOB, +// tests::{ +// tester::{DeployContractsTx, TxType, VmTesterBuilder}, +// utils::read_test_contract, +// }, +// HistoryEnabled, +// }, +// pubdata_builders::RollupPubdataBuilder +// }; +// +// pub(crate) const L2_DA_VALIDATOR_OUTPUT_HASH_KEY: usize = 5; +// pub(crate) const USED_L2_DA_VALIDATOR_ADDRESS_KEY: usize = 6; +// +// pub(crate) fn encoded_uncompressed_state_diffs(input: &PubdataInput) -> Vec { +// let mut result = vec![]; +// for state_diff in input.state_diffs.iter() { +// result.extend(state_diff.encode_padded()); +// } +// result +// } +// +// pub fn compose_header_for_l1_commit_rollup(input: PubdataInput) -> Vec { +// // The preimage under the hash `l2DAValidatorOutputHash` is expected to be in the following format: +// // - First 32 bytes are the hash of the uncompressed state diff. +// // - Then, there is a 32-byte hash of the full pubdata. +// // - Then, there is the 1-byte number of blobs published. +// // - Then, there are linear hashes of the published blobs, 32 bytes each. +// +// let mut full_header = vec![]; +// +// let uncompressed_state_diffs = encoded_uncompressed_state_diffs(&input); +// let uncompressed_state_diffs_hash = keccak256(&uncompressed_state_diffs); +// full_header.extend(uncompressed_state_diffs_hash); +// +// let mut full_pubdata = RollupPubdataBuilder::new().build_pubdata(input, false); +// let full_pubdata_hash = keccak256(&full_pubdata); +// full_header.extend(full_pubdata_hash); +// +// // Now, we need to calculate the linear hashes of the blobs. +// // Firstly, let's pad the pubdata to the size of the blob. +// if full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { +// let padding = +// vec![0u8; ZK_SYNC_BYTES_PER_BLOB - full_pubdata.len() % ZK_SYNC_BYTES_PER_BLOB]; +// full_pubdata.extend(padding); +// } +// full_header.push((full_pubdata.len() / ZK_SYNC_BYTES_PER_BLOB) as u8); +// +// full_pubdata +// .chunks(ZK_SYNC_BYTES_PER_BLOB) +// .for_each(|chunk| { +// full_header.extend(keccak256(chunk)); +// }); +// +// full_header +// } +// +// #[test] +// fn test_publish_and_clear_state() { +// // In this test, we check whether the L2 DA output hash is as expected. +// // We will publish 320kb worth of pubdata. +// // It should produce 3 blobs. +// +// let mut vm = VmTesterBuilder::new(HistoryEnabled) +// .with_empty_in_memory_storage() +// .with_execution_mode(TxExecutionMode::VerifyExecute) +// .with_random_rich_accounts(1) +// .build(); +// +// let account = &mut vm.rich_accounts[0]; +// +// // Firstly, deploy tx. It should publish the bytecode of the "test contract" +// let counter = read_test_contract(); +// +// let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); +// // We do not use compression here, to have the bytecode published in full. +// vm.vm.push_transaction_with_compression(tx, false); +// let result = vm.vm.execute(VmExecutionMode::OneTx); +// assert!(!result.result.is_failed(), "Transaction wasn't successful"); +// +// // Then, we call the l1 messenger to also send an L2->L1 message. +// let l1_messenger_contract = l1_messenger_contract(); +// let encoded_data = l1_messenger_contract +// .function("sendToL1") +// .unwrap() +// .encode_input(&[Token::Bytes(vec![])]) +// .unwrap(); +// +// let tx = account.get_l2_tx_for_execute( +// Execute { +// contract_address: Some(L1_MESSENGER_ADDRESS), +// calldata: encoded_data, +// value: U256::zero(), +// factory_deps: vec![], +// }, +// None, +// ); +// vm.vm.push_transaction(tx); +// let result = vm.vm.execute(VmExecutionMode::OneTx); +// assert!(!result.result.is_failed(), "Transaction wasn't successful"); +// +// let batch_result = vm.vm.execute(VmExecutionMode::Batch); +// if batch_result.result.is_failed() { +// panic!("Batch execution failed: {:?}", batch_result.result); +// } +// assert!( +// !batch_result.result.is_failed(), +// "Transaction wasn't successful" +// ); +// let pubdata_input = vm.vm.bootloader_state.get_pubdata_information().clone(); +// +// // Just to double check that the test makes sense. +// assert!(!pubdata_input.user_logs.is_empty()); +// assert!(!pubdata_input.l2_to_l1_messages.is_empty()); +// assert!(!pubdata_input.published_bytecodes.is_empty()); +// assert!(!pubdata_input.state_diffs.is_empty()); +// +// let expected_header: Vec = compose_header_for_l1_commit_rollup(pubdata_input); +// +// let l2_da_validator_output_hash = batch_result +// .logs +// .system_l2_to_l1_logs +// .iter() +// .find(|log| log.0.key == u256_to_h256(L2_DA_VALIDATOR_OUTPUT_HASH_KEY.into())) +// .unwrap() +// .0 +// .value; +// +// assert_eq!( +// l2_da_validator_output_hash, +// keccak256(&expected_header).into() +// ); +// +// let l2_used_da_validator_address = batch_result +// .logs +// .system_l2_to_l1_logs +// .iter() +// .find(|log| log.0.key == u256_to_h256(USED_L2_DA_VALIDATOR_ADDRESS_KEY.into())) +// .unwrap() +// .0 +// .value; +// +// assert_eq!( +// l2_used_da_validator_address, +// address_to_h256(&vm.vm.system_env.pubdata_params.l2_da_validator_address) +// ); +// } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index b424567aab0..4b7429c2829 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -1,195 +1,16 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_test_account::Account; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - Execute, ExecuteTransactionCommon, K256PrivateKey, U256, -}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - utils::StorageWritesDeduplicator, - vm_latest::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, + versions::testonly::l1_tx_execution::{ + test_l1_tx_execution, test_l1_tx_execution_high_gas_limit, }, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 9 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - `gasPerPubdataByte` - // - `basePubdataSpent` - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 5 slots instead of 9 due to 0 fee for transaction. - let basic_initial_writes = 5; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes, basic_initial_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated. - // But now the base pubdata spent has changed too. - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); +fn l1_tx_execution() { + test_l1_tx_execution::>(); } #[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![Account::new( - K256PrivateKey::from_bytes([0xad; 32].into()).unwrap(), - )]) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: Some(L1_MESSENGER_ADDRESS), - value: 0.into(), - factory_deps: vec![], - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); +fn l1_tx_execution_high_gas_limit() { + test_l1_tx_execution_high_gas_limit::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs index 1b5c3db59f7..82003b4a6ab 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l2_blocks.rs @@ -1,433 +1,33 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, L2BlockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, L2BlockNumber, - ProtocolVersionId, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, - }, - vm_latest::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, + versions::testonly::l2_blocks::{ + test_l2_block_first_in_batch, test_l2_block_initialization_number_non_zero, + test_l2_block_initialization_timestamp, test_l2_block_new_l2_block, + test_l2_block_same_l2_block, }, - HistoryMode, + vm_latest::{HistoryEnabled, Vm}, }; -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute::default(), - received_timestamp_ms: 0, - raw_bytes: None, - } -} - #[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); +fn l2_block_initialization_timestamp() { + test_l2_block_initialization_timestamp::>(); } #[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_initialization_number_non_zero() { + test_l2_block_initialization_number_non_zero::>(); } #[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_same_l2_block() { + test_l2_block_same_l2_block::>(); } #[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - L2BlockHasher::legacy_hash(L2BlockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } +fn l2_block_new_l2_block() { + test_l2_block_new_l2_block::>(); } #[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = L2BlockHasher::legacy_hash(L2BlockNumber(0)); - let prev_block_hash = L2BlockHasher::new(L2BlockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) +fn l2_block_first_in_batch() { + test_l2_block_first_in_batch::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs b/core/lib/multivm/src/versions/vm_latest/tests/migration.rs deleted file mode 100644 index 5b8da255180..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/migration.rs +++ /dev/null @@ -1,51 +0,0 @@ -use zksync_types::{get_code_key, H256, SYSTEM_CONTEXT_ADDRESS}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -/// This test checks that the new bootloader will work fine even if the previous system context contract is not -/// compatible with it, i.e. the bootloader will upgrade it before starting any transaction. -#[test] -fn test_migration_for_system_context_aa_interaction() { - let mut storage = get_empty_storage(); - // We will set the system context bytecode to zero. - storage.set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::zero()); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Now, we will just proceed with standard transaction execution. - // The bootloader should be able to update system context regardless of whether - // the upgrade transaction is there or not. - let account = &mut vm.rich_accounts[0]; - let counter = read_test_contract(); - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful {:#?}", - result.result - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!( - !batch_result.result.is_failed(), - "Batch transaction wasn't successful {:#?}", - batch_result.result - ); -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs index cc370f3906e..9d75aba9208 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/mod.rs @@ -1,3 +1,34 @@ +use std::{ + collections::{HashMap, HashSet}, + rc::Rc, +}; + +use zk_evm_1_5_0::{ + aux_structures::{MemoryPage, Timestamp}, + vm_state::VmLocalState, + zkevm_opcode_defs::{ContractCodeSha256Format, VersionedHashLen32}, +}; +use zksync_types::{writes::StateDiffRecord, StorageKey, StorageValue, Transaction, H256, U256}; +use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_vm_interface::pubdata::PubdataBuilder; + +use super::{HistoryEnabled, Vm}; +use crate::{ + interface::{ + storage::{InMemoryStorage, ReadStorage, StorageView, WriteStorage}, + CurrentExecutionState, L2BlockEnv, VmExecutionMode, VmExecutionResultAndLogs, + }, + versions::testonly::{filter_out_base_system_contracts, TestedVm}, + vm_latest::{ + constants::BOOTLOADER_HEAP_PAGE, + old_vm::{event_sink::InMemoryEventSink, history_recorder::HistoryRecorder}, + tracers::PubdataTracer, + types::internals::TransactionData, + utils::logs::StorageLogQuery, + AppDataFrameManagerWithHistory, HistoryMode, SimpleMemory, TracerDispatcher, + }, +}; + mod bootloader; mod default_aa; // TODO - fix this test @@ -8,6 +39,7 @@ mod call_tracer; mod circuits; mod code_oracle; mod constants; +mod evm_emulator; mod gas_limit; mod get_used_contracts; mod is_write_initial; @@ -20,11 +52,254 @@ mod prestate_tracer; mod refunds; mod require_eip712; mod rollbacks; -mod sekp256r1; +mod secp256r1; mod simple_execution; mod storage; -mod tester; mod tracing_execution_error; mod transfer; mod upgrade; -mod utils; + +type TestedLatestVm = Vm, HistoryEnabled>; + +impl TestedVm for TestedLatestVm { + type StateDump = VmInstanceInnerState; + + fn dump_state(&self) -> Self::StateDump { + self.dump_inner_state() + } + + fn gas_remaining(&mut self) -> u32 { + self.state.local_state.callstack.current.ergs_remaining + } + + fn get_current_execution_state(&self) -> CurrentExecutionState { + self.get_current_execution_state() + } + + fn decommitted_hashes(&self) -> HashSet { + self.get_used_contracts().into_iter().collect() + } + + fn finish_batch_with_state_diffs( + &mut self, + diffs: Vec, + pubdata_builder: Rc, + ) -> VmExecutionResultAndLogs { + let pubdata_tracer = PubdataTracer::new_with_forced_state_diffs( + self.batch_env.clone(), + VmExecutionMode::Batch, + diffs, + crate::vm_latest::MultiVMSubversion::latest(), + Some(pubdata_builder), + ); + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + Some(pubdata_tracer), + ) + } + + fn finish_batch_without_pubdata(&mut self) -> VmExecutionResultAndLogs { + self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + None, + ) + } + + fn insert_bytecodes(&mut self, bytecodes: &[&[u8]]) { + let bytecodes = bytecodes + .iter() + .map(|&bytecode| { + let hash = hash_bytecode(bytecode); + let words = bytes_to_be_words(bytecode.to_vec()); + (h256_to_u256(hash), words) + }) + .collect(); + self.state + .decommittment_processor + .populate(bytecodes, Timestamp(0)); + } + + fn known_bytecode_hashes(&self) -> HashSet { + let mut bytecode_hashes: HashSet<_> = self + .state + .decommittment_processor + .known_bytecodes + .inner() + .keys() + .copied() + .collect(); + filter_out_base_system_contracts(&mut bytecode_hashes); + bytecode_hashes + } + + fn manually_decommit(&mut self, code_hash: H256) -> bool { + let (header, normalized_preimage) = + ContractCodeSha256Format::normalize_for_decommitment(&code_hash.0); + let query = self + .state + .prepare_to_decommit( + 0, + header, + normalized_preimage, + MemoryPage(123), + Timestamp(0), + ) + .unwrap(); + self.state.execute_decommit(0, query).unwrap(); + query.is_fresh + } + + fn verify_required_bootloader_heap(&self, cells: &[(u32, U256)]) { + for &(slot, required_value) in cells { + let current_value = self + .state + .memory + .read_slot(BOOTLOADER_HEAP_PAGE as usize, slot as usize) + .value; + assert_eq!(current_value, required_value); + } + } + + fn write_to_bootloader_heap(&mut self, cells: &[(usize, U256)]) { + let timestamp = Timestamp(self.state.local_state.timestamp); + self.state + .memory + .populate_page(BOOTLOADER_HEAP_PAGE as usize, cells.to_vec(), timestamp) + } + + fn read_storage(&mut self, key: StorageKey) -> U256 { + self.state.storage.storage.read_from_storage(&key) + } + + fn last_l2_block_hash(&self) -> H256 { + self.bootloader_state.last_l2_block().get_hash() + } + + fn push_l2_block_unchecked(&mut self, block: L2BlockEnv) { + self.bootloader_state.push_l2_block(block); + } + + fn push_transaction_with_refund(&mut self, tx: Transaction, refund: u64) { + let tx = TransactionData::new(tx, false); + let overhead = tx.overhead_gas(); + self.push_raw_transaction(tx, overhead, refund, true) + } +} + +#[derive(Clone, Debug)] +pub(crate) struct ModifiedKeysMap(HashMap); + +impl ModifiedKeysMap { + fn new(storage: &mut StorageView) -> Self { + let mut modified_keys = storage.modified_storage_keys().clone(); + let inner = storage.inner_mut(); + // Remove modified keys that were set to the same value (e.g., due to a rollback). + modified_keys.retain(|key, value| inner.read_value(key) != *value); + Self(modified_keys) + } +} + +// We consider hashmaps to be equal even if there is a key +// that is not present in one but has zero value in another. +impl PartialEq for ModifiedKeysMap { + fn eq(&self, other: &Self) -> bool { + for (key, value) in &self.0 { + if *value != other.0.get(key).copied().unwrap_or_default() { + return false; + } + } + for (key, value) in &other.0 { + if *value != self.0.get(key).copied().unwrap_or_default() { + return false; + } + } + true + } +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct DecommitterTestInnerState { + /// There is no way to "truly" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + pub(crate) known_bytecodes: HistoryRecorder>, H>, + pub(crate) decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct StorageOracleInnerState { + /// There is no way to "truly" compare the storage pointer, + /// so we just compare the modified keys. This is reasonable enough. + pub(crate) modified_storage_keys: ModifiedKeysMap, + pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, + pub(crate) paid_changes: HistoryRecorder, H>, + pub(crate) initial_values: HistoryRecorder, H>, + pub(crate) returned_io_refunds: HistoryRecorder, H>, + pub(crate) returned_pubdata_costs: HistoryRecorder, H>, +} + +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct PrecompileProcessorTestInnerState { + pub(crate) timestamp_history: HistoryRecorder, H>, +} + +/// A struct that encapsulates the state of the VM's oracles +/// The state is to be used in tests. +#[derive(Clone, PartialEq, Debug)] +pub(crate) struct VmInstanceInnerState { + event_sink: InMemoryEventSink, + precompile_processor_state: PrecompileProcessorTestInnerState, + memory: SimpleMemory, + decommitter_state: DecommitterTestInnerState, + storage_oracle_state: StorageOracleInnerState, + local_state: VmLocalState, +} + +impl Vm, H> { + // Dump inner state of the VM. + pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { + let event_sink = self.state.event_sink.clone(); + let precompile_processor_state = PrecompileProcessorTestInnerState { + timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), + }; + let memory = self.state.memory.clone(); + let decommitter_state = DecommitterTestInnerState { + modified_storage_keys: ModifiedKeysMap::new( + &mut self + .state + .decommittment_processor + .get_storage() + .borrow_mut(), + ), + known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), + decommitted_code_hashes: self + .state + .decommittment_processor + .get_decommitted_code_hashes_with_history() + .clone(), + }; + + let storage_oracle_state = StorageOracleInnerState { + modified_storage_keys: ModifiedKeysMap::new( + &mut self.state.storage.storage.get_ptr().borrow_mut(), + ), + frames_stack: self.state.storage.storage_frames_stack.clone(), + paid_changes: self.state.storage.paid_changes.clone(), + initial_values: self.state.storage.initial_values.clone(), + returned_io_refunds: self.state.storage.returned_io_refunds.clone(), + returned_pubdata_costs: self.state.storage.returned_pubdata_costs.clone(), + }; + let local_state = self.state.local_state.clone(); + + VmInstanceInnerState { + event_sink, + precompile_processor_state, + memory, + decommitter_state, + storage_oracle_state, + local_state, + } + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 15c678ba953..c7ea3242d4a 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -1,191 +1,9 @@ -use zksync_types::{Execute, Nonce}; - use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, - VmRevertReason, - }, - vm_latest::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, + versions::testonly::nonce_holder::test_nonce_holder, + vm_latest::{HistoryEnabled, Vm}, }; -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - #[test] -fn test_nonce_holder() { - let mut account = Account::random(); - // let hex_addr = hex::encode(account.address.to_fixed_bytes()); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - // TODO - let mut _run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: Some(account.address), - calldata: vec![12], - value: Default::default(), - factory_deps: vec![], - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // TODO reenable. - // // Test 1: trying to set value under non sequential nonce value. - // run_nonce_test( - // 1u32, - // NonceHolderTestMode::SetValueUnderNonce, - // Some("Error function_selector = 0x13595475, data = 0x13595475".to_string()), - // "Allowed to set value under non sequential value", - // ); - - // // Test 2: increase min nonce by 1 with sequential nonce ordering: - // run_nonce_test( - // 0u32, - // NonceHolderTestMode::IncreaseMinNonceBy1, - // None, - // "Failed to increment nonce by 1 for sequential account", - // ); - - // // Test 3: correctly set value under nonce with sequential nonce ordering: - // run_nonce_test( - // 1u32, - // NonceHolderTestMode::SetValueUnderNonce, - // None, - // "Failed to set value under nonce sequential value", - // ); - - // // Test 5: migrate to the arbitrary nonce ordering: - // run_nonce_test( - // 2u32, - // NonceHolderTestMode::SwitchToArbitraryOrdering, - // None, - // "Failed to switch to arbitrary ordering", - // ); - - // // Test 6: increase min nonce by 5 - // run_nonce_test( - // 6u32, - // NonceHolderTestMode::IncreaseMinNonceBy5, - // None, - // "Failed to increase min nonce by 5", - // ); - - // // Test 7: since the nonces in range [6,10] are no longer allowed, the - // // tx with nonce 10 should not be allowed - // run_nonce_test( - // 10u32, - // NonceHolderTestMode::IncreaseMinNonceBy5, - // Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000a")), - // "Allowed to reuse nonce below the minimal one", - // ); - - // // Test 8: we should be able to use nonce 13 - // run_nonce_test( - // 13u32, - // NonceHolderTestMode::SetValueUnderNonce, - // None, - // "Did not allow to use unused nonce 10", - // ); - - // // Test 9: we should not be able to reuse nonce 13 - // run_nonce_test( - // 13u32, - // NonceHolderTestMode::IncreaseMinNonceBy5, - // Some(format!("Error function_selector = 0xe90aded4, data = 0xe90aded4000000000000000000000000{hex_addr}000000000000000000000000000000000000000000000000000000000000000d")), - // "Allowed to reuse the same nonce twice", - // ); - - // // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - // run_nonce_test( - // 14u32, - // NonceHolderTestMode::IncreaseMinNonceBy5, - // None, - // "Did not allow to use a bumped nonce", - // ); - - // // Test 11: Do not allow bumping nonce by too much - // run_nonce_test( - // 16u32, - // NonceHolderTestMode::IncreaseMinNonceTooMuch, - // Some("Error function_selector = 0x45ac24a6, data = 0x45ac24a600000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000040000000000000000000000".to_string()), - // "Allowed for incrementing min nonce too much", - // ); - - // // Test 12: Do not allow not setting a nonce as used - // run_nonce_test( - // 16u32, - // NonceHolderTestMode::LeaveNonceUnused, - // Some(format!("Error function_selector = 0x1f2f8478, data = 0x1f2f8478000000000000000000000000{hex_addr}0000000000000000000000000000000000000000000000000000000000000010")), - // "Allowed to leave nonce as unused", - // ); +fn nonce_holder() { + test_nonce_holder::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs index 110b14146c7..7ef45721ea5 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs @@ -1,142 +1,19 @@ -use zk_evm_1_5_0::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, + versions::testonly::precompiles::{test_ecrecover, test_keccak, test_sha256}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); +fn keccak() { + test_keccak::>(); } #[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(address), - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); +fn sha256() { + test_sha256::>(); } #[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account.address), - calldata: Vec::new(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm - .vm - .inspect(&mut Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); +fn ecrecover() { + test_ecrecover::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index 230b1d0ad87..7028f7a8971 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -4,25 +4,22 @@ use once_cell::sync::OnceCell; use zksync_test_account::TxType; use zksync_types::{utils::deployed_address_create, Execute, U256}; +use super::TestedLatestVm; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + interface::{InspectExecutionMode, TxExecutionMode, VmInterface, VmInterfaceExt}, tracers::PrestateTracer, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, + versions::testonly::{read_simple_transfer_contract, VmTesterBuilder}, + vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, ToTracerPointer}, }; #[test] fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); vm.deploy_test_contract(); let account = &mut vm.rich_accounts[0]; @@ -41,7 +38,7 @@ fn test_prestate_tracer() { let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Batch); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::OneTx); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() @@ -53,37 +50,27 @@ fn test_prestate_tracer() { #[test] fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() + .with_rich_accounts(1) .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); + .build::(); let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; + let account = &mut vm.rich_accounts[0]; + let tx = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce = tx.nonce().unwrap().0.into(); vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); + vm.vm.execute(InspectExecutionMode::OneTx); + let deployed_address = deployed_address_create(account.address, nonce); vm.test_contract = Some(deployed_address); // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; + let tx2 = account.get_deploy_tx(&contract, None, TxType::L2).tx; let nonce2 = tx2.nonce().unwrap().0.into(); vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); + vm.vm.execute(InspectExecutionMode::OneTx); + let deployed_address2 = deployed_address_create(account.address, nonce2); let account = &mut vm.rich_accounts[0]; @@ -111,7 +98,7 @@ fn test_prestate_tracer_diff_mode() { let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(&mut tracer_ptr.into(), VmExecutionMode::Bootloader); + .inspect(&mut tracer_ptr.into(), InspectExecutionMode::Bootloader); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index e494a45f35b..dfbec170682 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -1,234 +1,16 @@ -use ethabi::Token; -use zksync_types::{Address, Execute, U256}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{read_expensive_contract, read_test_contract}, - }, - types::internals::TransactionData, - HistoryEnabled, + versions::testonly::refunds::{ + test_negative_pubdata_for_transaction, test_predetermined_refunded_gas, }, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - // We need to provide the same DA validator to ensure the same logs - let rollup_da_validator = Address::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_rollup_pubdata_params(Some(rollup_da_validator)) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .with_rollup_pubdata_params(Some(rollup_da_validator)) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .with_rollup_pubdata_params(Some(rollup_da_validator)) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .deduplicated_storage_logs - .len(), - current_state_without_predefined_refunds - .deduplicated_storage_logs - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.deduplicated_storage_logs, - current_state_without_predefined_refunds.deduplicated_storage_logs - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); +fn predetermined_refunded_gas() { + test_predetermined_refunded_gas::>(); } #[test] fn negative_pubdata_for_transaction() { - let expensive_contract_address = Address::random(); - let (expensive_contract_bytecode, expensive_contract) = read_expensive_contract(); - let expensive_function = expensive_contract.function("expensive").unwrap(); - let cleanup_function = expensive_contract.function("cleanUp").unwrap(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .with_custom_contracts(vec![( - expensive_contract_bytecode, - expensive_contract_address, - false, - )]) - .build(); - - let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: expensive_function - .encode_input(&[Token::Uint(10.into())]) - .unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(expensive_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - - // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. - let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(expensive_contract_address), - calldata: cleanup_function.encode_input(&[]).unwrap(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - vm.vm.push_transaction(clean_up_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful: {result:#?}" - ); - assert!(result.refunds.operator_suggested_refund > 0); - assert_eq!( - result.refunds.gas_refunded, - result.refunds.operator_suggested_refund - ); + test_negative_pubdata_for_transaction::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index cdd71354c8d..470ddb28699 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -1,167 +1,9 @@ -use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, + versions::testonly::require_eip712::test_require_eip712, + vm_latest::{HistoryEnabled, Vm}, }; -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: Some(account_abstraction.address), - calldata: encoded_input, - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.into(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - Some(beneficiary.address), - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - vec![], - Default::default(), - ); - - let mut transaction_request: TransactionRequest = tx_712.into(); - transaction_request.chain_id = Some(chain_id.into()); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.into(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); +#[test] +fn require_eip712() { + test_require_eip712::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index a850053619b..de674498427 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,182 +1,36 @@ use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; +use zksync_test_account::{DeployContractsTx, TxType}; use zksync_types::{get_nonce_key, U256}; +use zksync_vm_interface::InspectExecutionMode; +use super::TestedLatestVm; use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + TxExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, + versions::testonly::{ + rollbacks::{test_rollback_in_call_mode, test_vm_loadnext_rollbacks, test_vm_rollbacks}, + VmTesterBuilder, + }, vm_latest::{ - tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, + types::internals::ZkSyncVmState, BootloaderState, HistoryEnabled, HistoryMode, + SimpleMemory, ToTracerPointer, Vm, VmTracer, }, }; -// #[test] -// fn test_vm_rollbacks() { -// let mut vm = VmTesterBuilder::new(HistoryEnabled) -// .with_empty_in_memory_storage() -// .with_execution_mode(TxExecutionMode::VerifyExecute) -// .with_random_rich_accounts(1) -// .build(); - -// let mut account = vm.rich_accounts[0].clone(); -// let counter = read_test_contract(); -// let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; -// let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; -// let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - -// let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ -// TransactionTestInfo::new_processed(tx_0.clone(), false), -// TransactionTestInfo::new_processed(tx_1.clone(), false), -// TransactionTestInfo::new_processed(tx_2.clone(), false), -// ]); - -// // reset vm -// vm.reset_with_empty_storage(); - -// let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ -// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), -// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), -// TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), -// // The correct nonce is 0, this tx will fail -// TransactionTestInfo::new_rejected( -// tx_2.clone(), -// TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(0)).into(), -// ), -// // This tx will succeed -// TransactionTestInfo::new_processed(tx_0.clone(), false), -// // The correct nonce is 1, this tx will fail -// TransactionTestInfo::new_rejected( -// tx_0.clone(), -// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), -// ), -// // The correct nonce is 1, this tx will fail -// TransactionTestInfo::new_rejected( -// tx_2.clone(), -// TxModifier::WrongNonce(tx_2.nonce().unwrap(), Nonce(1)).into(), -// ), -// // This tx will succeed -// TransactionTestInfo::new_processed(tx_1, false), -// // The correct nonce is 2, this tx will fail -// TransactionTestInfo::new_rejected( -// tx_0.clone(), -// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), -// ), -// // This tx will succeed -// TransactionTestInfo::new_processed(tx_2.clone(), false), -// // This tx will fail -// TransactionTestInfo::new_rejected( -// tx_2.clone(), -// TxModifier::NonceReused(tx_2.initiator_account(), tx_2.nonce().unwrap()).into(), -// ), -// TransactionTestInfo::new_rejected( -// tx_0.clone(), -// TxModifier::NonceReused(tx_0.initiator_account(), tx_0.nonce().unwrap()).into(), -// ), -// ]); - -// // assert_eq!(result_without_rollbacks, result_with_rollbacks); -// } - -// #[test] -// fn test_vm_loadnext_rollbacks() { -// let mut vm = VmTesterBuilder::new(HistoryEnabled) -// .with_empty_in_memory_storage() -// .with_execution_mode(TxExecutionMode::VerifyExecute) -// .with_random_rich_accounts(1) -// .build(); -// let mut account = vm.rich_accounts[0].clone(); - -// let loadnext_contract = get_loadnext_contract(); -// let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; -// let DeployContractsTx { -// tx: loadnext_deploy_tx, -// address, -// .. -// } = account.get_deploy_tx_with_factory_deps( -// &loadnext_contract.bytecode, -// Some(loadnext_constructor_data), -// loadnext_contract.factory_deps.clone(), -// TxType::L2, -// ); - -// let loadnext_tx_1 = account.get_l2_tx_for_execute( -// Execute { -// contract_address: Some(address), -// calldata: LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// } -// .to_bytes(), -// value: Default::default(), -// factory_deps: vec![], -// }, -// None, -// ); - -// let loadnext_tx_2 = account.get_l2_tx_for_execute( -// Execute { -// contract_address: Some(address), -// calldata: LoadnextContractExecutionParams { -// reads: 100, -// writes: 100, -// events: 100, -// hashes: 500, -// recursive_calls: 10, -// deploys: 60, -// } -// .to_bytes(), -// value: Default::default(), -// factory_deps: vec![], -// }, -// None, -// ); - -// // let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ -// // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), -// // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), -// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), -// // ]); - -// // TODO: reset vm -// // vm.reset_with_empty_storage(); - -// // let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ -// // TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), -// // TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), -// // TransactionTestInfo::new_rejected( -// // loadnext_deploy_tx.clone(), -// // TxModifier::NonceReused( -// // loadnext_deploy_tx.initiator_account(), -// // loadnext_deploy_tx.nonce().unwrap(), -// // ) -// // .into(), -// // ), -// // TransactionTestInfo::new_processed(loadnext_tx_1, false), -// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), -// // TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), -// // TransactionTestInfo::new_rejected( -// // loadnext_deploy_tx.clone(), -// // TxModifier::NonceReused( -// // loadnext_deploy_tx.initiator_account(), -// // loadnext_deploy_tx.nonce().unwrap(), -// // ) -// // .into(), -// // ), -// // TransactionTestInfo::new_processed(loadnext_tx_2, false), -// // ]); +#[test] +fn vm_rollbacks() { + test_vm_rollbacks::>(); +} -// // assert_eq!(result_without_rollbacks, result_with_rollbacks); -// } +#[test] +fn vm_loadnext_rollbacks() { + test_vm_loadnext_rollbacks::>(); +} // Testing tracer that does not allow the recursion to go deeper than a certain limit struct MaxRecursionTracer { @@ -208,11 +62,11 @@ fn test_layered_rollback() { // This test checks that the layered rollbacks work correctly, i.e. // the rollback by the operator will always revert all the changes - let mut vm = VmTesterBuilder::new(HistoryEnabled) + let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); + .with_rich_accounts(1) + .build::(); let account = &mut vm.rich_accounts[0]; let loadnext_contract = get_loadnext_contract().bytecode; @@ -227,7 +81,7 @@ fn test_layered_rollback() { TxType::L2, ); vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); + let deployment_res = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!deployment_res.result.is_failed(), "transaction failed"); let loadnext_transaction = account.get_loadnext_transaction( @@ -254,7 +108,8 @@ fn test_layered_rollback() { max_recursion_depth: 15, } .into_tracer_pointer(); - vm.vm.inspect(&mut tracer.into(), VmExecutionMode::OneTx); + vm.vm + .inspect(&mut tracer.into(), InspectExecutionMode::OneTx); let nonce_val2 = vm .vm @@ -281,6 +136,11 @@ fn test_layered_rollback() { ); vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); + let result = vm.vm.execute(InspectExecutionMode::OneTx); assert!(!result.result.is_failed(), "transaction must not fail"); } + +#[test] +fn rollback_in_call_mode() { + test_rollback_in_call_mode::>(); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs new file mode 100644 index 00000000000..11534a26ded --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tests/secp256r1.rs @@ -0,0 +1,9 @@ +use crate::{ + versions::testonly::secp256r1::test_secp256r1, + vm_latest::{HistoryEnabled, Vm}, +}; + +#[test] +fn secp256r1() { + test_secp256r1::>(); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs deleted file mode 100644 index 93be9506a3b..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs +++ /dev/null @@ -1,74 +0,0 @@ -use zk_evm_1_5_0::zkevm_opcode_defs::p256; -use zksync_system_constants::P256VERIFY_PRECOMPILE_ADDRESS; -use zksync_types::{web3::keccak256, Execute, H256, U256}; -use zksync_utils::h256_to_u256; - -use crate::{ - interface::{ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, -}; - -#[test] -fn test_sekp256r1() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_execution_mode(TxExecutionMode::EthCall) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - // The digest, secret key and public key were copied from the following test suit: `https://github.com/hyperledger/besu/blob/b6a6402be90339367d5bcabcd1cfd60df4832465/crypto/algorithms/src/test/java/org/hyperledger/besu/crypto/SECP256R1Test.java#L36` - let sk = p256::SecretKey::from_slice( - &hex::decode("519b423d715f8b581f4fa8ee59f4771a5b44c8130b4e3eacca54a56dda72b464").unwrap(), - ) - .unwrap(); - let sk = p256::ecdsa::SigningKey::from(sk); - - let digest = keccak256(&hex::decode("5905238877c77421f73e43ee3da6f2d9e2ccad5fc942dcec0cbd25482935faaf416983fe165b1a045ee2bcd2e6dca3bdf46c4310a7461f9a37960ca672d3feb5473e253605fb1ddfd28065b53cb5858a8ad28175bf9bd386a5e471ea7a65c17cc934a9d791e91491eb3754d03799790fe2d308d16146d5c9b0d0debd97d79ce8").unwrap()); - let public_key_encoded = hex::decode("1ccbe91c075fc7f4f033bfa248db8fccd3565de94bbfb12f3c59ff46c271bf83ce4014c68811f9a21a1fdb2c0e6113e06db7ca93b7404e78dc7ccd5ca89a4ca9").unwrap(); - - let (sig, _) = sk.sign_prehash_recoverable(&digest).unwrap(); - let (r, s) = sig.split_bytes(); - - let mut encoded_r = [0u8; 32]; - encoded_r.copy_from_slice(&r); - - let mut encoded_s = [0u8; 32]; - encoded_s.copy_from_slice(&s); - - let mut x = [0u8; 32]; - x.copy_from_slice(&public_key_encoded[0..32]); - - let mut y = [0u8; 32]; - y.copy_from_slice(&public_key_encoded[32..64]); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), - calldata: [digest, encoded_r, encoded_s, x, y].concat(), - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - - let execution_result = vm.vm.execute(VmExecutionMode::Batch); - - let ExecutionResult::Success { output } = execution_result.result else { - panic!("batch failed") - }; - - let output = H256::from_slice(&output); - - assert_eq!( - h256_to_u256(output), - U256::from(1u32), - "verification was not successful" - ); -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs index cd020ee9f96..29072e66b1e 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/simple_execution.rs @@ -1,83 +1,14 @@ -use assert_matches::assert_matches; - use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, + versions::testonly::simple_execution::{test_estimate_fee, test_simple_execute}, + vm_latest::{HistoryEnabled, Vm}, }; #[test] fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); + test_estimate_fee::>(); } #[test] fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); + test_simple_execute::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs index 126d174a646..4cb03875a0f 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs @@ -1,188 +1,14 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_test_account::Account; -use zksync_types::{fee::Fee, Address, Execute, U256}; - use crate::{ - interface::{ - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - vm_latest::{tests::tester::VmTesterBuilder, HistoryEnabled}, + versions::testonly::storage::{test_storage_behavior, test_transient_storage_behavior}, + vm_latest::{HistoryEnabled, Vm}, }; -#[derive(Debug, Default)] - -struct TestTxInfo { - calldata: Vec, - fee_overrides: Option, - should_fail: bool, -} - -fn test_storage(txs: Vec) -> u32 { - let bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let test_contract_address = Address::random(); - - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(txs.len() as u32) - .with_custom_contracts(vec![(bytecode, test_contract_address, false)]) - .build(); - - let mut last_result = None; - - for (id, tx) in txs.into_iter().enumerate() { - let TestTxInfo { - calldata, - fee_overrides, - should_fail, - } = tx; - - let account = &mut vm.rich_accounts[id]; - - vm.vm.make_snapshot(); - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: 0.into(), - factory_deps: vec![], - }, - fee_overrides, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - if should_fail { - assert!(result.result.is_failed(), "Transaction should fail"); - vm.vm.rollback_to_the_latest_snapshot(); - } else { - assert!(!result.result.is_failed(), "Transaction should not fail"); - vm.vm.pop_snapshot_no_rollback(); - } - - last_result = Some(result); - } - - last_result.unwrap().statistics.pubdata_published -} - -fn test_storage_one_tx(second_tx_calldata: Vec) -> u32 { - test_storage(vec![ - TestTxInfo::default(), - TestTxInfo { - calldata: second_tx_calldata, - fee_overrides: None, - should_fail: false, - }, - ]) -} - -#[test] -fn test_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - // In all of the tests below we provide the first tx to ensure that the tracers will not include - // the statistics from the start of the bootloader and will only include those for the transaction itself. - - let base_pubdata = test_storage_one_tx(vec![]); - let simple_test_pubdata = test_storage_one_tx( - contract - .function("simpleWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_pubdata = test_storage_one_tx( - contract - .function("resettingWrite") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - let resetting_write_via_revert_pubdata = test_storage_one_tx( - contract - .function("resettingWriteViaRevert") - .unwrap() - .encode_input(&[]) - .unwrap(), - ); - - assert_eq!(simple_test_pubdata - base_pubdata, 65); - assert_eq!(resetting_write_pubdata - base_pubdata, 34); - assert_eq!(resetting_write_via_revert_pubdata - base_pubdata, 34); -} - #[test] -fn test_transient_storage_behavior() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let first_tstore_test = contract - .function("testTransientStore") - .unwrap() - .encode_input(&[]) - .unwrap(); - // Second transaction checks that, as expected, the transient storage is cleared after the first transaction. - let second_tstore_test = contract - .function("assertTValue") - .unwrap() - .encode_input(&[Token::Uint(U256::zero())]) - .unwrap(); - - test_storage(vec![ - TestTxInfo { - calldata: first_tstore_test, - ..TestTxInfo::default() - }, - TestTxInfo { - calldata: second_tstore_test, - ..TestTxInfo::default() - }, - ]); +fn storage_behavior() { + test_storage_behavior::>(); } #[test] -fn test_transient_storage_behavior_panic() { - let contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json", - ); - - let basic_tstore_test = contract - .function("tStoreAndRevert") - .unwrap() - .encode_input(&[Token::Uint(U256::one()), Token::Bool(false)]) - .unwrap(); - - let small_fee = Fee { - // Something very-very small to make the validation fail - gas_limit: 10_000.into(), - ..Account::default_fee() - }; - - test_storage(vec![ - TestTxInfo { - calldata: basic_tstore_test.clone(), - ..TestTxInfo::default() - }, - TestTxInfo { - fee_overrides: Some(small_fee), - should_fail: true, - ..TestTxInfo::default() - }, - TestTxInfo { - calldata: basic_tstore_test, - ..TestTxInfo::default() - }, - ]); +fn transient_storage_behavior() { + test_transient_storage_behavior::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs deleted file mode 100644 index c0ef52afaa5..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_5_0::{aux_structures::Timestamp, vm_state::VmLocalState}; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - interface::storage::WriteStorage, - vm_latest::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder>, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_io_refunds: HistoryRecorder, H>, - pub(crate) returned_pubdata_costs: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.storage_frames_stack.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_io_refunds: self.state.storage.returned_io_refunds.clone(), - returned_pubdata_costs: self.state.storage.returned_pubdata_costs.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs deleted file mode 100644 index d55d1fd6a69..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs deleted file mode 100644 index 0f6e13877bf..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/tester/vm_tester.rs +++ /dev/null @@ -1,335 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::{l2_rollup_da_validator_bytecode, BaseSystemContracts}; -use zksync_types::{ - block::L2BlockHasher, - commitment::{L1BatchCommitmentMode, PubdataParams}, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}, - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmFactory, - VmInterface, VmInterfaceExt, - }, - vm_latest::{ - constants::BATCH_COMPUTATIONAL_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - // pub(crate) fn reset_with_empty_storage(&mut self) { - // self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - // self.reset_state(false); - // } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(&self.storage).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - pubdata_params: Option, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - pubdata_params: self.pubdata_params, - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - pubdata_params: None, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_custom_pubdata_params(mut self, pubdata_params: PubdataParams) -> Self { - self.pubdata_params = Some(pubdata_params); - self - } - - pub(crate) fn with_rollup_pubdata_params(mut self, fixed_address: Option
) -> Self { - // We choose some random address to put the L2 DA validator to. - let l2_da_validator_address = fixed_address.unwrap_or_else(Address::random); - - let bytecode = l2_rollup_da_validator_bytecode(); - - self.pubdata_params = Some(PubdataParams { - l2_da_validator_address, - pubdata_type: L1BatchCommitmentMode::Rollup, - }); - - self.custom_contracts - .push((bytecode, l2_da_validator_address, false)); - - self - } - - pub(crate) fn with_validium_pubdata_params(self) -> Self { - todo!() - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(mut self) -> VmTester { - if self.pubdata_params.is_none() { - self = self.with_rollup_pubdata_params(None); - } - self.system_env.pubdata_params = self.pubdata_params.unwrap(); - - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs index 2db37881352..a2cd6af6211 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs @@ -1,54 +1,9 @@ -use zksync_types::{Execute, H160}; - use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_latest::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, + versions::testonly::tracing_execution_error::test_tracing_of_execution_errors, + vm_latest::{HistoryEnabled, Vm}, }; #[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(contract_address), - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: vec![], - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); +fn tracing_of_execution_errors() { + test_tracing_of_execution_errors::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs index 2c380623636..f37ebe6a3fb 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs @@ -1,220 +1,16 @@ -use ethabi::Token; -use zksync_contracts::{load_contract, read_bytecode}; -use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; -use zksync_types::{utils::storage_key_for_eth_balance, AccountTreeId, Address, Execute, U256}; -use zksync_utils::u256_to_h256; - use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, - vm_latest::{ - tests::{ - tester::{get_empty_storage, VmTesterBuilder}, - utils::get_balance, - }, - HistoryEnabled, + versions::testonly::transfer::{ + test_reentrancy_protection_send_and_transfer, test_send_and_transfer, }, + vm_latest::{HistoryEnabled, Vm}, }; -enum TestOptions { - Send(U256), - Transfer(U256), -} - -fn test_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let recipeint_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - - let test_contract_address = Address::random(); - let recipient_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[Token::Address(recipient_address), Token::Uint(value)]) - .unwrap(), - ), - }; - - let mut storage = get_empty_storage(); - storage.set_value( - storage_key_for_eth_balance(&test_contract_address), - u256_to_h256(value), - ); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - (recipeint_bytecode, recipient_address, false), - ]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value: U256::zero(), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx); - let tx_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx_result.result.is_failed(), - "Transaction wasn't successful" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); - - let new_recipient_balance = get_balance( - AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), - &recipient_address, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!(new_recipient_balance, value); -} - #[test] -fn test_send_and_transfer() { - test_send_or_transfer(TestOptions::Send(U256::zero())); - test_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_send_or_transfer(TestOptions::Transfer(U256::from(10).pow(18.into()))); -} - -fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { - let test_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipeint_bytecode = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - let test_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", - ); - let reentrant_recipient_abi = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", - ); - - let test_contract_address = Address::random(); - let reentrant_recipeint_address = Address::random(); - - let (value, calldata) = match test_option { - TestOptions::Send(value) => ( - value, - test_abi - .function("send") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipeint_address), - Token::Uint(value), - ]) - .unwrap(), - ), - TestOptions::Transfer(value) => ( - value, - test_abi - .function("transfer") - .unwrap() - .encode_input(&[ - Token::Address(reentrant_recipeint_address), - Token::Uint(value), - ]) - .unwrap(), - ), - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - ( - reentrant_recipeint_bytecode, - reentrant_recipeint_address, - false, - ), - ]) - .build(); - - // First transaction, the job of which is to warm up the slots for balance of the recipient as well as its storage variable. - let account = &mut vm.rich_accounts[0]; - let tx1 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(reentrant_recipeint_address), - calldata: reentrant_recipient_abi - .function("setX") - .unwrap() - .encode_input(&[]) - .unwrap(), - value: U256::from(1), - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx1); - let tx1_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !tx1_result.result.is_failed(), - "Transaction 1 wasn't successful" - ); - - let tx2 = account.get_l2_tx_for_execute( - Execute { - contract_address: Some(test_contract_address), - calldata, - value, - factory_deps: vec![], - }, - None, - ); - - vm.vm.push_transaction(tx2); - let tx2_result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - tx2_result.result.is_failed(), - "Transaction 2 should have failed, but it succeeded" - ); - - let batch_result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!batch_result.result.is_failed(), "Batch wasn't successful"); +fn send_and_transfer() { + test_send_and_transfer::>(); } #[test] -fn test_reentrancy_protection_send_and_transfer() { - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Send(U256::from(10).pow(18.into()))); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer(U256::zero())); - test_reentrancy_protection_send_or_transfer(TestOptions::Transfer( - U256::from(10).pow(18.into()), - )); +fn reentrancy_protection_send_and_transfer() { + test_reentrancy_protection_send_and_transfer::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index d85a504de40..9889e26e4d2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -1,354 +1,21 @@ -use zk_evm_1_5_0::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; use crate::{ - interface::{ - storage::WriteStorage, ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, - VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - }, - vm_latest::{ - tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, - HistoryEnabled, + versions::testonly::upgrade::{ + test_complex_upgrader, test_force_deploy_upgrade, test_protocol_upgrade_is_first, }, + vm_latest::{HistoryEnabled, Vm}, }; -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block #[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); +fn protocol_upgrade_is_first() { + test_protocol_upgrade_is_first::>(); } -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. #[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); +fn force_deploy_upgrade() { + test_force_deploy_upgrade::>(); } -/// Here we show how the work with the complex upgrader could be done #[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), - calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: Some(COMPLEX_UPGRADER_ADDRESS), - calldata: complex_upgrader_calldata, - factory_deps: vec![], - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") +fn complex_upgrader() { + test_complex_upgrader::>(); } diff --git a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs b/core/lib/multivm/src/versions/vm_latest/tests/utils.rs deleted file mode 100644 index 9c9d4817588..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/tests/utils.rs +++ /dev/null @@ -1,150 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::{ - interface::storage::{StoragePtr, WriteStorage}, - vm_latest::{tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode}, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -// pub(crate) fn read_message_root() -> Vec { -// read_bytecode( -// "contracts/l1-contracts/artifacts-zk/contracts/bridgehub/MessageRoot.sol/MessageRoot.json", -// ) -// } - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn read_simple_transfer_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/simple-transfer/simple-transfer.sol/SimpleTransfer.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn load_precompiles_contract() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -pub(crate) fn read_expensive_contract() -> (Vec, Contract) { - const PATH: &str = - "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; - (read_bytecode(PATH), load_contract(PATH)) -} - -pub(crate) fn read_proxy_counter_contract() -> (Vec, Contract) { - const PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/proxy_counter.sol/ProxyCounter.json"; - (read_bytecode(PATH), load_contract(PATH)) -} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs index 6a908c2a73e..2ae5e81a328 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/default_tracers.rs @@ -13,7 +13,7 @@ use zk_evm_1_5_0::{ zkevm_opcode_defs::{decoding::EncodingModeProduction, Opcode, RetOpcode}, }; -use super::PubdataTracer; +use super::{EvmDeployTracer, PubdataTracer}; use crate::{ glue::GlueInto, interface::{ @@ -38,7 +38,7 @@ use crate::{ }; /// Default tracer for the VM. It manages the other tracers execution and stop the vm when needed. -pub(crate) struct DefaultExecutionTracer { +pub struct DefaultExecutionTracer { tx_has_been_processed: bool, execution_mode: VmExecutionMode, @@ -63,14 +63,18 @@ pub(crate) struct DefaultExecutionTracer { // It only takes into account circuits that are generated for actual execution. It doesn't // take into account e.g circuits produced by the initial bootloader memory commitment. pub(crate) circuits_tracer: CircuitsTracer, + // This tracer is responsible for handling EVM deployments and providing the data to the code decommitter. + pub(crate) evm_deploy_tracer: Option>, subversion: MultiVMSubversion, storage: StoragePtr, _phantom: PhantomData, } impl DefaultExecutionTracer { + #[allow(clippy::too_many_arguments)] pub(crate) fn new( computational_gas_limit: u32, + use_evm_emulator: bool, execution_mode: VmExecutionMode, dispatcher: TracerDispatcher, storage: StoragePtr, @@ -92,6 +96,7 @@ impl DefaultExecutionTracer { pubdata_tracer, ret_from_the_bootloader: None, circuits_tracer: CircuitsTracer::new(), + evm_deploy_tracer: use_evm_emulator.then(EvmDeployTracer::new), storage, _phantom: PhantomData, } @@ -172,6 +177,9 @@ macro_rules! dispatch_tracers { tracer.$function($( $params ),*); } $self.circuits_tracer.$function($( $params ),*); + if let Some(tracer) = &mut $self.evm_deploy_tracer { + tracer.$function($( $params ),*); + } }; } @@ -289,6 +297,12 @@ impl DefaultExecutionTracer { .finish_cycle(state, bootloader_state) .stricter(&result); + if let Some(evm_deploy_tracer) = &mut self.evm_deploy_tracer { + result = evm_deploy_tracer + .finish_cycle(state, bootloader_state) + .stricter(&result); + } + result.stricter(&self.should_stop_execution()) } diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs new file mode 100644 index 00000000000..becc4f22527 --- /dev/null +++ b/core/lib/multivm/src/versions/vm_latest/tracers/evm_deploy_tracer.rs @@ -0,0 +1,103 @@ +use std::{marker::PhantomData, mem}; + +use zk_evm_1_5_0::{ + aux_structures::Timestamp, + tracing::{AfterExecutionData, VmLocalStateData}, + zkevm_opcode_defs::{ + FarCallOpcode, FatPointer, Opcode, CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER, + }, +}; +use zksync_types::{CONTRACT_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_utils::{bytecode::hash_evm_bytecode, bytes_to_be_words, h256_to_u256}; +use zksync_vm_interface::storage::StoragePtr; + +use super::{traits::VmTracer, utils::read_pointer}; +use crate::{ + interface::{storage::WriteStorage, tracer::TracerExecutionStatus}, + tracers::dynamic::vm_1_5_0::DynTracer, + vm_latest::{BootloaderState, HistoryMode, SimpleMemory, ZkSyncVmState}, +}; + +/// Tracer responsible for collecting information about EVM deploys and providing those +/// to the code decommitter. +#[derive(Debug)] +pub(crate) struct EvmDeployTracer { + tracked_signature: [u8; 4], + pending_bytecodes: Vec>, + _phantom: PhantomData, +} + +impl EvmDeployTracer { + pub(crate) fn new() -> Self { + let tracked_signature = + ethabi::short_signature("publishEVMBytecode", &[ethabi::ParamType::Bytes]); + + Self { + tracked_signature, + pending_bytecodes: vec![], + _phantom: PhantomData, + } + } +} + +impl DynTracer> for EvmDeployTracer { + fn after_execution( + &mut self, + state: VmLocalStateData<'_>, + data: AfterExecutionData, + memory: &SimpleMemory, + _storage: StoragePtr, + ) { + if !matches!( + data.opcode.variant.opcode, + Opcode::FarCall(FarCallOpcode::Normal) + ) { + return; + }; + + let current = state.vm_local_state.callstack.current; + let from = current.msg_sender; + let to = current.this_address; + if from != CONTRACT_DEPLOYER_ADDRESS || to != KNOWN_CODES_STORAGE_ADDRESS { + return; + } + + let calldata_ptr = + state.vm_local_state.registers[usize::from(CALL_IMPLICIT_CALLDATA_FAT_PTR_REGISTER)]; + let data = read_pointer(memory, FatPointer::from_u256(calldata_ptr.value)); + if data.len() < 4 { + return; + } + let (signature, data) = data.split_at(4); + if signature != self.tracked_signature { + return; + } + + match ethabi::decode(&[ethabi::ParamType::Bytes], data) { + Ok(decoded) => { + let published_bytecode = decoded.into_iter().next().unwrap().into_bytes().unwrap(); + self.pending_bytecodes.push(published_bytecode); + } + Err(err) => tracing::error!("Unable to decode `publishEVMBytecode` call: {err}"), + } + } +} + +impl VmTracer for EvmDeployTracer { + fn finish_cycle( + &mut self, + state: &mut ZkSyncVmState, + _bootloader_state: &mut BootloaderState, + ) -> TracerExecutionStatus { + for published_bytecode in mem::take(&mut self.pending_bytecodes) { + let hash = hash_evm_bytecode(&published_bytecode); + let as_words = bytes_to_be_words(published_bytecode); + + state.decommittment_processor.populate( + vec![(h256_to_u256(hash), as_words)], + Timestamp(state.local_state.timestamp), + ); + } + TracerExecutionStatus::Continue + } +} diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs index fe916e19e8c..82721a32264 100755 --- a/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/mod.rs @@ -1,11 +1,13 @@ pub(crate) use circuits_tracer::CircuitsTracer; pub(crate) use default_tracers::DefaultExecutionTracer; +pub(crate) use evm_deploy_tracer::EvmDeployTracer; pub(crate) use pubdata_tracer::PubdataTracer; pub(crate) use refunds::RefundsTracer; pub(crate) use result_tracer::ResultTracer; pub(crate) mod circuits_tracer; pub(crate) mod default_tracers; +pub(crate) mod evm_deploy_tracer; pub(crate) mod pubdata_tracer; pub(crate) mod refunds; pub(crate) mod result_tracer; diff --git a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs index 9e620f96af2..998e8a13ad2 100644 --- a/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tracers/pubdata_tracer.rs @@ -1,18 +1,17 @@ -use std::marker::PhantomData; +use std::{marker::PhantomData, rc::Rc}; use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zk_evm_1_5_0::{ aux_structures::Timestamp, tracing::{BeforeExecutionData, VmLocalStateData}, }; -use zksync_types::{ - l2_to_l1_log::l2_to_l1_logs_tree_size, writes::StateDiffRecord, AccountTreeId, - ProtocolVersionId, StorageKey, L1_MESSENGER_ADDRESS, -}; +use zksync_types::{writes::StateDiffRecord, AccountTreeId, StorageKey, L1_MESSENGER_ADDRESS}; use zksync_utils::{h256_to_u256, u256_to_bytes_be, u256_to_h256}; +use zksync_vm_interface::pubdata::PubdataBuilder; use crate::{ interface::{ + pubdata::{L1MessengerL2ToL1Log, PubdataInput}, storage::{StoragePtr, WriteStorage}, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, L1BatchEnv, VmEvent, VmExecutionMode, @@ -20,14 +19,14 @@ use crate::{ tracers::dynamic::vm_1_5_0::DynTracer, utils::events::{ extract_bytecode_publication_requests_from_l1_messenger, - extract_l2tol1logs_from_l1_messenger, L1MessengerL2ToL1Log, + extract_l2tol1logs_from_l1_messenger, }, vm_latest::{ bootloader_state::{utils::apply_pubdata_to_memory, BootloaderState}, constants::BOOTLOADER_HEAP_PAGE, old_vm::{history_recorder::HistoryMode, memory::SimpleMemory}, tracers::{traits::VmTracer, utils::VmHook}, - types::internals::{PubdataInput, ZkSyncVmState}, + types::internals::ZkSyncVmState, utils::logs::collect_events_and_l1_system_logs_after_timestamp, vm::MultiVMSubversion, StorageOracle, @@ -44,7 +43,7 @@ pub(crate) struct PubdataTracer { // to the L1Messenger. enforced_state_diffs: Option>, subversion: MultiVMSubversion, - protocol_version: ProtocolVersionId, + pubdata_builder: Option>, _phantom_data: PhantomData, } @@ -53,7 +52,7 @@ impl PubdataTracer { l1_batch_env: L1BatchEnv, execution_mode: VmExecutionMode, subversion: MultiVMSubversion, - protocol_version: ProtocolVersionId, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -61,7 +60,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: None, subversion, - protocol_version, + pubdata_builder, _phantom_data: Default::default(), } } @@ -74,7 +73,7 @@ impl PubdataTracer { execution_mode: VmExecutionMode, forced_state_diffs: Vec, subversion: MultiVMSubversion, - protocol_version: ProtocolVersionId, + pubdata_builder: Option>, ) -> Self { Self { l1_batch_env, @@ -82,7 +81,7 @@ impl PubdataTracer { execution_mode, enforced_state_diffs: Some(forced_state_diffs), subversion, - protocol_version, + pubdata_builder, _phantom_data: Default::default(), } } @@ -192,7 +191,6 @@ impl PubdataTracer { l2_to_l1_messages: self.get_total_l1_messenger_messages(state), published_bytecodes: self.get_total_published_bytecodes(state), state_diffs: self.get_state_diffs(&state.storage), - l2_to_l1_logs_tree_size: l2_to_l1_logs_tree_size(self.protocol_version), } } } @@ -230,18 +228,22 @@ impl VmTracer for PubdataTracer { if self.pubdata_info_requested { let pubdata_input = self.build_pubdata_input(state); - // Save the pubdata for the future initial bootloader memory building - bootloader_state.set_pubdata_input(pubdata_input.clone()); - // Apply the pubdata to the current memory let mut memory_to_apply = vec![]; apply_pubdata_to_memory( &mut memory_to_apply, - pubdata_input, - bootloader_state.get_pubdata_params(), - bootloader_state.get_vm_subversion(), + self.pubdata_builder + .as_ref() + .expect("`pubdata_builder` is required to finish batch") + .as_ref(), + &pubdata_input, + bootloader_state.protocol_version(), ); + + // Save the pubdata for the future initial bootloader memory building + bootloader_state.set_pubdata_input(pubdata_input); + state.memory.populate_page( BOOTLOADER_HEAP_PAGE as usize, memory_to_apply, diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs index 0b9f704e8db..601b7b8bd01 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/mod.rs @@ -1,9 +1,7 @@ -pub(crate) use pubdata::PubdataInput; pub(crate) use snapshot::VmSnapshot; pub(crate) use transaction_data::TransactionData; pub(crate) use vm_state::new_vm_state; pub use vm_state::ZkSyncVmState; -pub(crate) mod pubdata; mod snapshot; mod transaction_data; mod vm_state; diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs deleted file mode 100644 index c0684624bd8..00000000000 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/pubdata.rs +++ /dev/null @@ -1,339 +0,0 @@ -use ethabi::Token; -use zksync_contracts::load_sys_contract_interface; -use zksync_mini_merkle_tree::MiniMerkleTree; -use zksync_types::{ - ethabi, - web3::keccak256, - writes::{compress_state_diffs, StateDiffRecord}, -}; -use zksync_utils::bytecode::hash_bytecode; - -use crate::utils::events::L1MessengerL2ToL1Log; -/// Struct based on which the pubdata blob is formed -#[derive(Debug, Clone, Default)] -pub(crate) struct PubdataInput { - pub(crate) user_logs: Vec, - pub(crate) l2_to_l1_messages: Vec>, - pub(crate) published_bytecodes: Vec>, - pub(crate) state_diffs: Vec, - pub(crate) l2_to_l1_logs_tree_size: usize, -} - -impl PubdataInput { - pub(crate) fn build_pubdata_legacy(self, with_uncompressed_state_diffs: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - .. - } = self; - - // Encoding user L2->L1 logs. - // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` - l1_messenger_pubdata.extend((user_logs.len() as u32).to_be_bytes()); - for l2tol1log in user_logs { - l1_messenger_pubdata.extend(l2tol1log.packed_encoding()); - } - - // Encoding L2->L1 messages - // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` - l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); - for message in l2_to_l1_messages { - l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(message); - } - // Encoding bytecodes - // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` - l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); - for bytecode in published_bytecodes { - l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(bytecode); - } - // Encoding state diffs - // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` - let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); - l1_messenger_pubdata.extend(state_diffs_compressed); - - if with_uncompressed_state_diffs { - l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); - for state_diff in state_diffs { - l1_messenger_pubdata.extend(state_diff.encode_padded()); - } - } - - l1_messenger_pubdata - } -} - -pub trait PubdataBuilder { - // when `l2_version` is true it will return the data to be sent to the L1_MESSENGER - // otherwise it returns the array of bytes to be sent to L1 inside the operator input. - fn build_pubdata(&self, input: PubdataInput, l2_version: bool) -> Vec; -} - -pub struct RollupPubdataBuilder { - // l2_handler_address: Address, -} - -impl RollupPubdataBuilder { - pub fn new() -> Self { - Self { - // l2_handler_address: l2_handler_address, - } - } -} - -fn encode_user_logs(user_logs: Vec) -> Vec { - // Encoding user L2->L1 logs. - // Format: `[(numberOfL2ToL1Logs as u32) || l2tol1logs[1] || ... || l2tol1logs[n]]` - let mut result = vec![]; - result.extend((user_logs.len() as u32).to_be_bytes()); - for l2tol1log in user_logs { - result.extend(l2tol1log.packed_encoding()); - } - result -} - -impl PubdataBuilder for RollupPubdataBuilder { - fn build_pubdata(&self, input: PubdataInput, l2_version: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - let mut l2_da_header = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - l2_to_l1_logs_tree_size, - } = input; - - if l2_version { - let chained_log_hash = build_chained_log_hash(user_logs.clone()); - let log_root_hash = build_logs_root(user_logs.clone(), l2_to_l1_logs_tree_size); - let chained_msg_hash = build_chained_message_hash(l2_to_l1_messages.clone()); - let chained_bytecodes_hash = build_chained_bytecode_hash(published_bytecodes.clone()); - - l2_da_header.push(Token::FixedBytes(chained_log_hash)); - l2_da_header.push(Token::FixedBytes(log_root_hash)); - l2_da_header.push(Token::FixedBytes(chained_msg_hash)); - l2_da_header.push(Token::FixedBytes(chained_bytecodes_hash)); - } - - l1_messenger_pubdata.extend(encode_user_logs(user_logs)); - - // Encoding L2->L1 messages - // Format: `[(numberOfMessages as u32) || (messages[1].len() as u32) || messages[1] || ... || (messages[n].len() as u32) || messages[n]]` - l1_messenger_pubdata.extend((l2_to_l1_messages.len() as u32).to_be_bytes()); - for message in l2_to_l1_messages { - l1_messenger_pubdata.extend((message.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(message); - } - - // Encoding bytecodes - // Format: `[(numberOfBytecodes as u32) || (bytecodes[1].len() as u32) || bytecodes[1] || ... || (bytecodes[n].len() as u32) || bytecodes[n]]` - l1_messenger_pubdata.extend((published_bytecodes.len() as u32).to_be_bytes()); - for bytecode in published_bytecodes { - l1_messenger_pubdata.extend((bytecode.len() as u32).to_be_bytes()); - l1_messenger_pubdata.extend(bytecode); - } - - // Encoding state diffs - // Format: `[size of compressed state diffs u32 || compressed state diffs || (# state diffs: intial + repeated) as u32 || sorted state diffs by ]` - let state_diffs_compressed = compress_state_diffs(state_diffs.clone()); - l1_messenger_pubdata.extend(state_diffs_compressed); - - if l2_version { - l1_messenger_pubdata.extend((state_diffs.len() as u32).to_be_bytes()); - for state_diff in state_diffs { - l1_messenger_pubdata.extend(state_diff.encode_padded()); - } - - let func_selector = load_sys_contract_interface("IL2DAValidator") - .function("validatePubdata") - .expect("validatePubdata Function does not exist on IL2DAValidator") - .short_signature() - .to_vec(); - - l2_da_header.push(ethabi::Token::Bytes(l1_messenger_pubdata)); - - l1_messenger_pubdata = [func_selector, ethabi::encode(&l2_da_header)] - .concat() - .to_vec(); - } - - l1_messenger_pubdata - } -} - -pub struct ValidiumPubdataBuilder {} - -impl ValidiumPubdataBuilder { - pub fn new() -> Self { - Self {} - } -} - -impl PubdataBuilder for ValidiumPubdataBuilder { - fn build_pubdata(&self, input: PubdataInput, l2_version: bool) -> Vec { - let mut l1_messenger_pubdata = vec![]; - let mut l2_da_header = vec![]; - - let PubdataInput { - user_logs, - l2_to_l1_messages, - published_bytecodes, - state_diffs, - l2_to_l1_logs_tree_size, - } = input; - - if l2_version { - let chained_log_hash = build_chained_log_hash(user_logs.clone()); - let log_root_hash = build_logs_root(user_logs.clone(), l2_to_l1_logs_tree_size); - let chained_msg_hash = build_chained_message_hash(l2_to_l1_messages.clone()); - let chained_bytecodes_hash = build_chained_bytecode_hash(published_bytecodes.clone()); - - l2_da_header.push(Token::FixedBytes(chained_log_hash)); - l2_da_header.push(Token::FixedBytes(log_root_hash)); - l2_da_header.push(Token::FixedBytes(chained_msg_hash)); - l2_da_header.push(Token::FixedBytes(chained_bytecodes_hash)); - } - - l1_messenger_pubdata.extend(encode_user_logs(user_logs)); - - if l2_version { - let func_selector = load_sys_contract_interface("IL2DAValidator") - .function("validatePubdata") - .expect("validatePubdata Function does not exist on IL2DAValidator") - .short_signature() - .to_vec(); - - l2_da_header.push(ethabi::Token::Bytes(l1_messenger_pubdata)); - - [func_selector, ethabi::encode(&l2_da_header)] - .concat() - .to_vec() - } else { - let state_diffs_packed = state_diffs - .into_iter() - .flat_map(|diff| diff.encode_padded()) - .collect::>(); - - keccak256(&state_diffs_packed).to_vec() - } - } -} - -fn build_chained_log_hash(user_logs: Vec) -> Vec { - let mut chained_log_hash = vec![0u8; 32]; - - for log in user_logs { - let log_bytes = log.packed_encoding(); - let hash = keccak256(&log_bytes); - - chained_log_hash = keccak256(&[chained_log_hash, hash.to_vec()].concat()).to_vec(); - } - - chained_log_hash -} - -fn build_logs_root( - user_logs: Vec, - l2_to_l1_logs_tree_size: usize, -) -> Vec { - let logs = user_logs.iter().map(|log| { - let encoded = log.packed_encoding(); - let mut slice = [0u8; 88]; - slice.copy_from_slice(&encoded); - slice - }); - MiniMerkleTree::new(logs, Some(l2_to_l1_logs_tree_size)) - .merkle_root() - .as_bytes() - .to_vec() -} - -fn build_chained_message_hash(l2_to_l1_messages: Vec>) -> Vec { - let mut chained_msg_hash = vec![0u8; 32]; - - for msg in l2_to_l1_messages { - let hash = keccak256(&msg); - - chained_msg_hash = keccak256(&[chained_msg_hash, hash.to_vec()].concat()).to_vec(); - } - - chained_msg_hash -} - -fn build_chained_bytecode_hash(published_bytecodes: Vec>) -> Vec { - let mut chained_bytecode_hash = vec![0u8; 32]; - - for bytecode in published_bytecodes { - let hash = hash_bytecode(&bytecode).to_fixed_bytes(); - - chained_bytecode_hash = - keccak256(&[chained_bytecode_hash, hash.to_vec()].concat()).to_vec(); - } - - chained_bytecode_hash -} - -#[cfg(test)] -mod tests { - - // FIXME: restore this test - // #[test] - // fn test_basic_pubdata_building() { - // // Just using some constant addresses for tests - // let addr1 = BOOTLOADER_ADDRESS; - // let addr2 = ACCOUNT_CODE_STORAGE_ADDRESS; - - // let user_logs = vec![L1MessengerL2ToL1Log { - // l2_shard_id: 0, - // is_service: false, - // tx_number_in_block: 0, - // sender: addr1, - // key: 1.into(), - // value: 128.into(), - // }]; - - // let l2_to_l1_messages = vec![hex::decode("deadbeef").unwrap()]; - - // let published_bytecodes = vec![hex::decode("aaaabbbb").unwrap()]; - - // // For covering more cases, we have two state diffs: - // // One with enumeration index present (and so it is a repeated write) and the one without it. - // let state_diffs = vec![ - // StateDiffRecord { - // address: addr2, - // key: 155.into(), - // derived_key: u256_to_h256(125.into()).0, - // enumeration_index: 12, - // initial_value: 11.into(), - // final_value: 12.into(), - // }, - // StateDiffRecord { - // address: addr2, - // key: 156.into(), - // derived_key: u256_to_h256(126.into()).0, - // enumeration_index: 0, - // initial_value: 0.into(), - // final_value: 14.into(), - // }, - // ]; - - // let input = PubdataInput { - // user_logs, - // l2_to_l1_messages, - // published_bytecodes, - // state_diffs, - // }; - - // let pubdata = - // ethabi::encode(&[ethabi::Token::Bytes(input.build_pubdata(true))])[32..].to_vec(); - - // assert_eq!(hex::encode(pubdata), "00000000000000000000000000000000000000000000000000000000000002c700000001000000000000000000000000000000000000000000008001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000100000004deadbeef0000000100000004aaaabbbb0100002a040001000000000000000000000000000000000000000000000000000000000000007e090e0000000c0901000000020000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009b000000000000000000000000000000000000000000000000000000000000007d000000000000000c000000000000000000000000000000000000000000000000000000000000000b000000000000000000000000000000000000000000000000000000000000000c00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008002000000000000000000000000000000000000000000000000000000000000009c000000000000000000000000000000000000000000000000000000000000007e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000e000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"); - // } -} diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 2ec86eb3cea..90948f2f89f 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -46,8 +46,8 @@ pub(crate) struct TransactionData { pub(crate) raw_bytes: Option>, } -impl From for TransactionData { - fn from(execute_tx: Transaction) -> Self { +impl TransactionData { + pub(crate) fn new(execute_tx: Transaction, use_evm_emulator: bool) -> Self { match execute_tx.common_data { ExecuteTransactionCommon::L2(common_data) => { let nonce = U256::from_big_endian(&common_data.nonce.to_be_bytes()); @@ -62,6 +62,19 @@ impl From for TransactionData { U256::zero() }; + let should_deploy_contract = if execute_tx.execute.contract_address.is_none() { + // Transactions with no `contract_address` should be filtered out by the API server, + // so this is more of a sanity check. + assert!( + use_evm_emulator, + "`execute.contract_address` not set for transaction {:?} with EVM emulation disabled", + common_data.hash() + ); + U256([1, 0, 0, 0]) + } else { + U256::zero() + }; + // Ethereum transactions do not sign gas per pubdata limit, and so for them we need to use // some default value. We use the maximum possible value that is allowed by the bootloader // (i.e. we can not use u64::MAX, because the bootloader requires gas per pubdata for such @@ -85,7 +98,7 @@ impl From for TransactionData { value: execute_tx.execute.value, reserved: [ should_check_chain_id, - U256::zero(), + should_deploy_contract, U256::zero(), U256::zero(), ], diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs index b6e5e127c85..d25f66361f1 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/vm_state.rs @@ -33,7 +33,6 @@ use crate::{ oracles::storage::StorageOracle, types::l1_batch::bootloader_initial_memory, utils::l2_blocks::{assert_next_block, load_last_l2_block}, - MultiVMSubversion, }, }; @@ -65,7 +64,6 @@ pub(crate) fn new_vm_state( storage: StoragePtr, system_env: &SystemEnv, l1_batch_env: &L1BatchEnv, - subversion: MultiVMSubversion, ) -> (ZkSyncVmState, BootloaderState) { let last_l2_block = if let Some(last_l2_block) = load_last_l2_block(&storage) { last_l2_block @@ -100,6 +98,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + if let Some(evm_emulator) = &system_env.base_system_smart_contracts.evm_emulator { + decommittment_processor.populate( + vec![(h256_to_u256(evm_emulator.hash), evm_emulator.code.clone())], + Timestamp(0), + ); + } + memory.populate( vec![( BOOTLOADER_CODE_PAGE, @@ -119,6 +124,13 @@ pub(crate) fn new_vm_state( Timestamp(0), ); + // By convention, default AA is used as a fallback if the EVM emulator is not available. + let evm_emulator_code_hash = system_env + .base_system_smart_contracts + .evm_emulator + .as_ref() + .unwrap_or(&system_env.base_system_smart_contracts.default_aa) + .hash; let mut vm = VmState::empty_state( storage_oracle, memory, @@ -130,11 +142,7 @@ pub(crate) fn new_vm_state( default_aa_code_hash: h256_to_u256( system_env.base_system_smart_contracts.default_aa.hash, ), - // For now, the default account hash is used as the code hash for the EVM simulator. - // In the 1.5.0 version, it is not possible to instantiate EVM bytecode. - evm_simulator_code_hash: h256_to_u256( - system_env.base_system_smart_contracts.default_aa.hash, - ), + evm_simulator_code_hash: h256_to_u256(evm_emulator_code_hash), zkporter_is_available: system_env.zk_porter_available, }, ); @@ -183,8 +191,7 @@ pub(crate) fn new_vm_state( system_env.execution_mode, bootloader_initial_memory, first_l2_block, - system_env.pubdata_params, - subversion, + system_env.version, ); (vm, bootloader_state) diff --git a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs index 0fb803de5d4..aeb66755f51 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/mod.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/mod.rs @@ -1,6 +1,37 @@ -/// Utility functions for the VM. +//! Utility functions for the VM. + +use once_cell::sync::Lazy; +use zk_evm_1_5_0::aux_structures::MemoryPage; +use zksync_types::{H256, KNOWN_CODES_STORAGE_ADDRESS}; +use zksync_vm_interface::VmEvent; + pub mod fee; pub mod l2_blocks; pub(crate) mod logs; pub mod overhead; pub mod transaction_encoding; + +pub const fn heap_page_from_base(base: MemoryPage) -> MemoryPage { + MemoryPage(base.0 + 2) +} + +/// Extracts all bytecodes marked as known on the system contracts. +pub fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { + static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { + ethabi::long_signature( + "MarkedAsKnown", + &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], + ) + }); + + all_generated_events + .iter() + .filter(|event| { + // Filter events from the deployer contract that match the expected signature. + event.address == KNOWN_CODES_STORAGE_ADDRESS + && event.indexed_topics.len() == 3 + && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE + }) + .map(|event| event.indexed_topics[1]) + .collect() +} diff --git a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs index 86c49a3eb15..ed532f89dbc 100644 --- a/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs +++ b/core/lib/multivm/src/versions/vm_latest/utils/transaction_encoding.rs @@ -10,7 +10,9 @@ pub trait TransactionVmExt { impl TransactionVmExt for Transaction { fn bootloader_encoding_size(&self) -> usize { - let transaction_data: TransactionData = self.clone().into(); + // Since we want to just measure the encoding size, `use_evm_emulator` arg doesn't matter here, + // so we use a more lenient option. + let transaction_data = TransactionData::new(self.clone(), true); transaction_data.into_tokens().len() } } diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index 63ec5f7451c..c6573d64200 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -1,25 +1,28 @@ +use std::{collections::HashMap, rc::Rc}; + use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, vm::VmVersion, Transaction, H256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{be_words_to_bytes, h256_to_u256, u256_to_h256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmTrackingContracts, + VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_latest::{ bootloader_state::BootloaderState, old_vm::{events::merge_events, history_recorder::HistoryEnabled}, - tracers::dispatcher::TracerDispatcher, + tracers::{dispatcher::TracerDispatcher, PubdataTracer}, types::internals::{new_vm_state, VmSnapshot, ZkSyncVmState}, }, HistoryMode, @@ -36,7 +39,7 @@ pub(crate) enum MultiVMSubversion { SmallBootloaderMemory, /// The final correct version of v1.5.0 IncreasedBootloaderMemory, - /// Version for protocol v25 + /// VM for post-gateway versions. Gateway, } @@ -82,6 +85,20 @@ impl Vm { self.state.local_state.callstack.current.ergs_remaining } + pub(crate) fn decommit_bytecodes(&self, hashes: &[H256]) -> HashMap> { + let bytecodes = hashes.iter().map(|&hash| { + let bytecode_words = self + .state + .decommittment_processor + .known_bytecodes + .inner() + .get(&h256_to_u256(hash)) + .unwrap_or_else(|| panic!("Bytecode with hash {hash:?} not found")); + (hash, be_words_to_bytes(bytecode_words)) + }); + bytecodes.collect() + } + // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let (raw_events, l1_messages) = self.state.event_sink.flatten(); @@ -121,18 +138,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(tracer, execution_mode.into(), None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -164,19 +186,31 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let pubdata_tracer = Some(PubdataTracer::new( + self.batch_env.clone(), + VmExecutionMode::Batch, + self.subversion, + Some(pubdata_builder.clone()), + )); - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + let result = self.inspect_inner( + &mut TracerDispatcher::default(), + VmExecutionMode::Batch, + pubdata_tracer, + ); let execution_state = self.get_current_execution_state(); - let bootloader_memory = self.bootloader_state.bootloader_memory(); + let bootloader_memory = self + .bootloader_state + .bootloader_memory(pubdata_builder.as_ref()); FinishedL1Batch { block_tip_execution_result: result, final_execution_state: execution_state, final_bootloader_memory: Some(bootloader_memory), - pubdata_input: Some(self.bootloader_state.get_encoded_pubdata()), + pubdata_input: Some( + self.bootloader_state + .settlement_layer_pubdata(pubdata_builder.as_ref()), + ), state_diffs: Some( self.bootloader_state .get_pubdata_information() @@ -206,8 +240,7 @@ impl Vm { storage: StoragePtr, subversion: MultiVMSubversion, ) -> Self { - let (state, bootloader_state) = - new_vm_state(storage.clone(), &system_env, &batch_env, subversion); + let (state, bootloader_state) = new_vm_state(storage.clone(), &system_env, &batch_env); Self { bootloader_state, state, diff --git a/core/lib/multivm/src/versions/vm_m5/utils.rs b/core/lib/multivm/src/versions/vm_m5/utils.rs index 8c5bca674c6..a38618395b1 100644 --- a/core/lib/multivm/src/versions/vm_m5/utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -253,13 +253,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - /// Log query, which handle initial and repeated writes to the storage #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct StorageLogQuery { diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index 40f66659f29..55afeed17cd 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -1,11 +1,14 @@ +use std::rc::Rc; + use zksync_types::{vm::VmVersion, Transaction}; use zksync_utils::h256_to_u256; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, + PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, vm_m5::{ @@ -50,27 +53,34 @@ impl Vm { _phantom: Default::default(), } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics::default() + } } impl VmInterface for Vm { /// Tracers are not supported for here we use `()` as a placeholder type TracerDispatcher = (); - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { crate::vm_m5::vm_with_bootloader::push_transaction_to_bootloader_memory( &mut self.vm, &tx, self.system_env.execution_mode.glue_into(), - ) + ); + PushTransactionResult { + compressed_bytecodes: (&[]).into(), // bytecode compression isn't supported + } } fn inspect( &mut self, _tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => self.vm.execute_next_tx().glue_into(), TxExecutionMode::EstimateFee | TxExecutionMode::EthCall => self .vm @@ -79,8 +89,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -102,24 +111,11 @@ impl VmInterface for Vm { // Bytecode compression isn't supported ( Ok(vec![].into()), - self.inspect(&mut (), VmExecutionMode::OneTx), + self.inspect(&mut (), InspectExecutionMode::OneTx), ) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: 0, - event_sink_history: 0, - memory_inner: 0, - memory_history: 0, - decommittment_processor_inner: 0, - decommittment_processor_history: 0, - storage_inner: 0, - storage_history: 0, - } - } - - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m5::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_m6/utils.rs b/core/lib/multivm/src/versions/vm_m6/utils.rs index d9709022fe3..912a30a4eaf 100644 --- a/core/lib/multivm/src/versions/vm_m6/utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/utils.rs @@ -5,7 +5,7 @@ use zk_evm_1_3_1::{ vm_state::PrimitiveValue, zkevm_opcode_defs::FatPointer, }; -use zksync_contracts::{read_zbin_bytecode, BaseSystemContracts}; +use zksync_contracts::BaseSystemContracts; use zksync_system_constants::ZKPORTER_IS_AVAILABLE; use zksync_types::{Address, StorageLogKind, H160, MAX_L2_TX_GAS_LIMIT, U256}; use zksync_utils::h256_to_u256; @@ -256,13 +256,6 @@ pub fn create_test_block_params() -> (BlockContext, BlockProperties) { ) } -pub fn read_bootloader_test_code(test: &str) -> Vec { - read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )) -} - pub(crate) fn calculate_computational_gas_used< S: Storage, T: PubdataSpentTracer, diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 627687a5524..4c67a218418 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -1,13 +1,14 @@ -use std::collections::HashSet; +use std::{collections::HashSet, rc::Rc}; use zksync_types::{vm::VmVersion, Transaction}; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::{history_mode::HistoryMode, GlueInto}, interface::{ storage::StoragePtr, BytecodeCompressionError, BytecodeCompressionResult, FinishedL1Batch, - L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, @@ -50,24 +51,45 @@ impl Vm { system_env, } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } } impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { - crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( - &mut self.vm, - &tx, - self.system_env.execution_mode.glue_into(), - None, - ) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { + let compressed_bytecodes = + crate::vm_m6::vm_with_bootloader::push_transaction_to_bootloader_memory( + &mut self.vm, + &tx, + self.system_env.execution_mode.glue_into(), + None, + ); + PushTransactionResult { + compressed_bytecodes: compressed_bytecodes.into(), + } } fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { self.vm @@ -76,7 +98,7 @@ impl VmInterface for Vm { } match execution_mode { - VmExecutionMode::OneTx => match self.system_env.execution_mode { + InspectExecutionMode::OneTx => match self.system_env.execution_mode { TxExecutionMode::VerifyExecute => { let enable_call_tracer = tracer.call_tracer.is_some(); let result = self.vm.execute_next_tx( @@ -95,8 +117,7 @@ impl VmInterface for Vm { ) .glue_into(), }, - VmExecutionMode::Batch => self.finish_batch().block_tip_execution_result, - VmExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), + InspectExecutionMode::Bootloader => self.vm.execute_block_tip().glue_into(), } } @@ -186,24 +207,7 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - - fn finish_batch(&mut self) -> FinishedL1Batch { + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { self.vm .execute_till_block_end( crate::vm_m6::vm_with_bootloader::BootloaderJobType::BlockPostprocessing, diff --git a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs index 7a9fbb73fe4..ae44e721b0d 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm_with_bootloader.rs @@ -491,7 +491,7 @@ fn get_bootloader_memory_v1( predefined_refunds[tx_index_in_block], block_gas_price_per_pubdata as u32, previous_compressed, - compressed_bytecodes, + &compressed_bytecodes, ); previous_compressed += total_compressed_len; @@ -536,7 +536,7 @@ fn get_bootloader_memory_v2( predefined_refunds[tx_index_in_block], block_gas_price_per_pubdata as u32, previous_compressed, - compressed_bytecodes, + &compressed_bytecodes, ); previous_compressed += total_compressed_len_words; @@ -554,7 +554,7 @@ pub fn push_transaction_to_bootloader_memory( tx: &Transaction, execution_mode: TxExecutionMode, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx: TransactionData = tx.clone().into(); let block_gas_per_pubdata_byte = vm.block_context.context.block_gas_price_per_pubdata(); let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); @@ -564,7 +564,7 @@ pub fn push_transaction_to_bootloader_memory( execution_mode, overhead, explicit_compressed_bytecodes, - ); + ) } pub fn push_raw_transaction_to_bootloader_memory( @@ -573,7 +573,7 @@ pub fn push_raw_transaction_to_bootloader_memory( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { match vm.vm_subversion { MultiVMSubversion::V1 => push_raw_transaction_to_bootloader_memory_v1( vm, @@ -599,7 +599,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -651,7 +651,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( predefined_overhead, trusted_ergs_limit, previous_bytecodes, - compressed_bytecodes, + &compressed_bytecodes, ); vm.state.memory.populate_page( @@ -661,6 +661,7 @@ fn push_raw_transaction_to_bootloader_memory_v1( ); vm.bootloader_state.add_tx_data(encoded_tx_size); vm.bootloader_state.add_compressed_bytecode(compressed_len); + compressed_bytecodes } // Bytecode compression bug fixed @@ -670,7 +671,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( execution_mode: TxExecutionMode, predefined_overhead: u32, explicit_compressed_bytecodes: Option>, -) { +) -> Vec { let tx_index_in_block = vm.bootloader_state.free_tx_index(); let already_included_txs_size = vm.bootloader_state.free_tx_offset(); @@ -730,7 +731,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( predefined_overhead, trusted_ergs_limit, previous_bytecodes, - compressed_bytecodes, + &compressed_bytecodes, ); vm.state.memory.populate_page( @@ -741,6 +742,7 @@ fn push_raw_transaction_to_bootloader_memory_v2( vm.bootloader_state.add_tx_data(encoded_tx_size); vm.bootloader_state .add_compressed_bytecode(compressed_bytecodes_encoding_len_words); + compressed_bytecodes } #[allow(clippy::too_many_arguments)] @@ -752,7 +754,7 @@ fn get_bootloader_memory_for_tx( predefined_refund: u32, block_gas_per_pubdata: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let overhead_gas = tx.overhead_gas(block_gas_per_pubdata); let trusted_gas_limit = tx.trusted_gas_limit(block_gas_per_pubdata); @@ -779,7 +781,7 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( predefined_overhead: u32, trusted_gas_limit: u32, previous_compressed_bytecode_size: usize, - compressed_bytecodes: Vec, + compressed_bytecodes: &[CompressedBytecodeInfo], ) -> Vec<(usize, U256)> { let mut memory: Vec<(usize, U256)> = Vec::default(); let bootloader_description_offset = @@ -815,8 +817,8 @@ pub(crate) fn get_bootloader_memory_for_encoded_tx( COMPRESSED_BYTECODES_OFFSET + 1 + previous_compressed_bytecode_size; let memory_addition: Vec<_> = compressed_bytecodes - .into_iter() - .flat_map(|x| bytecode::encode_call(&x)) + .iter() + .flat_map(bytecode::encode_call) .collect(); let memory_addition = bytes_to_be_words(memory_addition); diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index 7bd488f90a9..14c895d7a0b 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs index 8196760a621..9462a89be2a 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs @@ -90,6 +90,7 @@ impl Vm { logs, statistics, refunds, + new_known_factory_deps: None, }; (stop_reason, result) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs index dcda1457b76..a73c212db29 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs deleted file mode 100644 index 23b250d485b..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::interface::ExecutionResult; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs deleted file mode 100644 index b2c126dea00..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs deleted file mode 100644 index fb2d3389407..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_refunds_enhancement::{CallTracer, HistoryEnabled}; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs deleted file mode 100644 index 92e043ae96f..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs deleted file mode 100644 index 1ff6ce12557..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; - -use crate::interface::TxExecutionMode; -use crate::vm_refunds_enhancement::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs deleted file mode 100644 index 8c121db3e43..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_refunds_enhancement::{HistoryDisabled, HistoryMode, Vm}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs deleted file mode 100644 index 88ed141630a..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::types::inputs::system_env::TxExecutionMode; -use crate::vm_refunds_enhancement::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs deleted file mode 100644 index d7b96133000..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs deleted file mode 100644 index 138879cd7ed..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs deleted file mode 100644 index 269b6cf396c..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs +++ /dev/null @@ -1,498 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_refunds_enhancement::tests::tester::default_l1_batch; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, Vm}; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs deleted file mode 100644 index ffb38dd3725..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs deleted file mode 100644 index 21959461906..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs +++ /dev/null @@ -1,181 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::TxExecutionMode; -use crate::interface::VmRevertReason; -use crate::interface::{ExecutionResult, Halt, TxRevertReason, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_nonce_holder_tester; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs deleted file mode 100644 index 03a704841b0..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs +++ /dev/null @@ -1,163 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{ - AccountTreeId, Address, Eip712Domain, Execute, L2ChainId, Nonce, Transaction, U256, -}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_many_owners_custom_account_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs deleted file mode 100644 index 8107ddcdabf..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs +++ /dev/null @@ -1,259 +0,0 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::{ - BootloaderState, DynTracer, HistoryEnabled, HistoryMode, TracerExecutionStatus, - TracerExecutionStopReason, VmTracer, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - vec![Box::new(MaxRecursionTracer { - max_recursion_depth: 15, - })], - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.inspect(vec![], VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs deleted file mode 100644 index eb5e3879837..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs deleted file mode 100644 index 3158fc49444..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs +++ /dev/null @@ -1,127 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_refunds_enhancement::old_vm::event_sink::InMemoryEventSink; -use crate::vm_refunds_enhancement::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs deleted file mode 100644 index 8f7ecc0a733..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::VmRevertReason; -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, -}; -use crate::vm_refunds_enhancement::tests::tester::vm_tester::VmTester; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs deleted file mode 100644 index 800af517ed3..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs +++ /dev/null @@ -1,300 +0,0 @@ -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, -}; -use crate::vm_refunds_enhancement::tests::tester::Account; -use crate::vm_refunds_enhancement::tests::tester::TxType; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::utils::l2_blocks::load_last_l2_block; -use crate::vm_refunds_enhancement::{HistoryMode, Vm}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - history_mode: H, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new( - l1_batch, - self.vm.system_env.clone(), - self.storage.clone(), - self.history_mode.clone(), - ); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - history_mode: H, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - history_mode: self.history_mode.clone(), - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(history_mode: H) -> Self { - Self { - history_mode, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new( - l1_batch_env, - self.system_env, - storage_ptr.clone(), - self.history_mode.clone(), - ); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - history_mode: self.history_mode, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs deleted file mode 100644 index a839f4708ad..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::interface::TxExecutionMode; -use crate::interface::{TxRevertReason, VmRevertReason}; -use crate::vm_refunds_enhancement::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs deleted file mode 100644 index cbbec9a83d5..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs +++ /dev/null @@ -1,342 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::verify_required_storage; -use crate::vm_refunds_enhancement::HistoryEnabled; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs deleted file mode 100644 index ffbb9d89260..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_refunds_enhancement::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 735bd29c3b0..81b0c52cce5 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -1,14 +1,16 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_refunds_enhancement::{ @@ -75,18 +77,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(dispatcher, execution_mode) + self.inspect_inner(dispatcher, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -101,7 +108,7 @@ impl VmInterface for Vm { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); - let result = self.inspect(dispatcher, VmExecutionMode::OneTx); + let result = self.inspect(dispatcher, InspectExecutionMode::OneTx); if self.has_unpublished_bytecodes() { ( Err(BytecodeCompressionError::BytecodeCompressionFailed), @@ -118,12 +125,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index 2ccedcc6aa9..3e2474835fa 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs index c48d48edd3b..b1ad4d257b7 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs @@ -88,6 +88,7 @@ impl Vm { .refund_tracer .map(|r| r.get_refunds()) .unwrap_or_default(), + new_known_factory_deps: None, }; tx_tracer.dispatcher.save_results(&mut result); diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs index d082085a155..dbd8813035e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs deleted file mode 100644 index a30b5a58f63..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::vm_latest::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs deleted file mode 100644 index 773aa77e150..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs deleted file mode 100644 index 7ee647ee1f7..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::tracers::CallTracer; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_virtual_blocks::tracers::traits::ToTracerPointer; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect( - call_tracer.into_tracer_pointer().into(), - VmExecutionMode::OneTx, - ); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs deleted file mode 100644 index 02a69a6a5d2..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs deleted file mode 100644 index e51b8cab570..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs +++ /dev/null @@ -1,45 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; - -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Some(Default::default()), - ..Default::default() - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs deleted file mode 100644 index 06d8191310b..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::HistoryMode; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_virtual_blocks::Vm; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs deleted file mode 100644 index f8074c1db10..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::interface::TxExecutionMode; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs deleted file mode 100644 index 2c7ef4a8d11..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs deleted file mode 100644 index 64d9f98ddb3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs deleted file mode 100644 index cba534deeaf..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs +++ /dev/null @@ -1,502 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ - ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_virtual_blocks::tests::tester::default_l1_batch; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_virtual_blocks::Vm; -use crate::HistoryMode; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs deleted file mode 100644 index ffb38dd3725..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs deleted file mode 100644 index 162a3f46cb1..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs +++ /dev/null @@ -1,182 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_nonce_holder_tester; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs deleted file mode 100644 index d0b3b7cbee3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs deleted file mode 100644 index 988841e90ce..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, Eip712Domain, Execute, Nonce, Transaction, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_many_owners_custom_account_contract; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, 270.into()).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(chain_id.into()); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, chain_id.into()); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, chain_id.into()).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs deleted file mode 100644 index c4eac73499f..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs deleted file mode 100644 index a5c0db9468b..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_virtual_blocks::old_vm::event_sink::InMemoryEventSink; -use crate::vm_virtual_blocks::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_virtual_blocks::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; -use crate::HistoryMode as CommonHistoryMode; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs deleted file mode 100644 index 15d3d98ab1d..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,216 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::vm_tester::VmTester; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs deleted file mode 100644 index 9fe0635eba3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs +++ /dev/null @@ -1,291 +0,0 @@ -use std::marker::PhantomData; -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use crate::HistoryMode; -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, VmExecutionMode}; -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::Account; -use crate::vm_virtual_blocks::tests::tester::TxType; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; -use crate::vm_virtual_blocks::utils::l2_blocks::load_last_l2_block; -use crate::vm_virtual_blocks::Vm; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - _phantom: PhantomData, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: 270.into(), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs deleted file mode 100644 index 8258abe0685..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::interface::{TxExecutionMode, TxRevertReason, VmRevertReason}; -use zksync_types::{Execute, H160}; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_virtual_blocks::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs deleted file mode 100644 index 8b3fa0ea291..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs +++ /dev/null @@ -1,344 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::verify_required_storage; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs deleted file mode 100644 index e3db232ffce..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_virtual_blocks::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; -use crate::vm_virtual_blocks::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 2a9d6eed6c7..a2d18e10de4 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -1,14 +1,16 @@ +use std::rc::Rc; + use circuit_sequencer_api_1_3_3::sort_storage_access::sort_storage_access_queries; use zksync_types::{l2_to_l1_log::UserL2ToL1Log, Transaction}; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::GlueInto, interface::{ storage::{StoragePtr, WriteStorage}, BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, - FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_virtual_blocks::{ @@ -75,18 +77,23 @@ impl Vm { impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { - self.push_transaction_with_compression(tx, true) + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + self.push_transaction_with_compression(tx, true); + PushTransactionResult { + compressed_bytecodes: self + .bootloader_state + .get_last_tx_compressed_bytecodes() + .into(), + } } /// Execute VM with custom tracers. fn inspect( &mut self, tracer: &mut TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode) + self.inspect_inner(tracer, execution_mode.into()) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -118,12 +125,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); + fn finish_batch(&mut self, _pubdata_builder: Rc) -> FinishedL1Batch { + let result = self.inspect_inner(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index bfb121a740e..5ff27046377 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,16 +1,17 @@ -use std::mem; +use std::{mem, rc::Rc}; -use zksync_types::{vm::VmVersion, Transaction}; +use zksync_types::{vm::VmVersion, ProtocolVersionId, Transaction}; use zksync_vm2::interface::Tracer; +use zksync_vm_interface::{pubdata::PubdataBuilder, InspectExecutionMode}; use crate::{ glue::history_mode::HistoryMode, interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, utils::ShadowVm, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, - VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, + BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, PushTransactionResult, + SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + VmMemoryMetrics, }, tracers::TracerDispatcher, vm_latest::HistoryEnabled, @@ -55,8 +56,7 @@ macro_rules! dispatch_legacy_vm { impl VmInterface for LegacyVmInstance { type TracerDispatcher = TracerDispatcher, H>; - /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { dispatch_legacy_vm!(self.push_transaction(tx)) } @@ -64,7 +64,7 @@ impl VmInterface for LegacyVmInstance { fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { dispatch_legacy_vm!(self.inspect(&mut mem::take(dispatcher).into(), execution_mode)) } @@ -87,13 +87,9 @@ impl VmInterface for LegacyVmInstance { )) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - dispatch_legacy_vm!(self.record_vm_memory_metrics()) - } - /// Return the results of execution of all batch - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_legacy_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_legacy_vm!(self.finish_batch(pubdata_builder)) } } @@ -222,6 +218,11 @@ impl LegacyVmInstance { } } } + + /// Returns memory-related oracle metrics. + pub fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + dispatch_legacy_vm!(self.record_vm_memory_metrics()) + } } /// Fast VM shadowed by the latest legacy VM. @@ -255,14 +256,14 @@ impl VmInterface for FastVmInsta Tr, ); - fn push_transaction(&mut self, tx: Transaction) { - dispatch_fast_vm!(self.push_transaction(tx)); + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + dispatch_fast_vm!(self.push_transaction(tx)) } fn inspect( &mut self, tracer: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { match self { Self::Fast(vm) => vm.inspect(&mut tracer.1, execution_mode), @@ -292,12 +293,8 @@ impl VmInterface for FastVmInsta } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - dispatch_fast_vm!(self.record_vm_memory_metrics()) - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_fast_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_fast_vm!(self.finish_batch(pubdata_builder)) } } @@ -340,3 +337,11 @@ impl FastVmInstance { Self::Shadowed(ShadowedFastVm::new(l1_batch_env, system_env, storage_view)) } } + +/// Checks whether the protocol version is supported by the fast VM. +pub fn is_supported_by_fast_vm(protocol_version: ProtocolVersionId) -> bool { + matches!( + protocol_version.into(), + VmVersion::Vm1_5_0IncreasedBootloaderMemory + ) +} diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index 308cd65427f..3484f2dad34 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -42,7 +42,6 @@ impl FileBackedObjectStore { Bucket::SchedulerWitnessJobsFri, Bucket::ProofsFri, Bucket::StorageSnapshot, - Bucket::TeeVerifierInput, Bucket::VmDumps, ] { let bucket_path = format!("{base_dir}/{bucket}"); diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 740e8d76e24..0859d58d04b 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -19,7 +19,6 @@ pub enum Bucket { ProofsTee, StorageSnapshot, DataAvailability, - TeeVerifierInput, VmDumps, } @@ -39,7 +38,6 @@ impl Bucket { Self::ProofsTee => "proofs_tee", Self::StorageSnapshot => "storage_logs_snapshots", Self::DataAvailability => "data_availability", - Self::TeeVerifierInput => "tee_verifier_inputs", Self::VmDumps => "vm_dumps", } } diff --git a/core/lib/protobuf_config/Cargo.toml b/core/lib/protobuf_config/Cargo.toml index 92d9bd53978..87a0a63567b 100644 --- a/core/lib/protobuf_config/Cargo.toml +++ b/core/lib/protobuf_config/Cargo.toml @@ -26,6 +26,7 @@ rand.workspace = true hex.workspace = true secrecy.workspace = true tracing.workspace = true +time.workspace = true [build-dependencies] zksync_protobuf_build.workspace = true diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index bb9189fd649..9cfa73c28ac 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use std::num::{NonZeroU32, NonZeroUsize}; use anyhow::Context as _; use zksync_config::configs::{api, ApiConfig}; @@ -113,6 +113,11 @@ impl ProtoRepr for proto::Web3JsonRpc { .map(|x| x.try_into()) .transpose() .context("latest_values_cache_size_mb")?, + latest_values_max_block_lag: self + .latest_values_max_block_lag + .map(|x| x.try_into()) + .transpose() + .context("latest_values_max_block_lag")?, fee_history_limit: self.fee_history_limit, max_batch_request_size: self .max_batch_request_size @@ -184,6 +189,7 @@ impl ProtoRepr for proto::Web3JsonRpc { latest_values_cache_size_mb: this .latest_values_cache_size_mb .map(|x| x.try_into().unwrap()), + latest_values_max_block_lag: this.latest_values_max_block_lag.map(NonZeroU32::get), fee_history_limit: this.fee_history_limit, max_batch_request_size: this.max_batch_request_size.map(|x| x.try_into().unwrap()), max_response_body_size_mb: this diff --git a/core/lib/protobuf_config/src/chain.rs b/core/lib/protobuf_config/src/chain.rs index f91bf07e43f..2f8ac8df07e 100644 --- a/core/lib/protobuf_config/src/chain.rs +++ b/core/lib/protobuf_config/src/chain.rs @@ -86,6 +86,7 @@ impl ProtoRepr for proto::StateKeeper { // needed during the initialization from files bootloader_hash: None, default_aa_hash: None, + evm_emulator_hash: None, fee_account_addr: None, l1_batch_commit_data_generator_mode: Default::default(), }) diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index 81cad437fe4..2219b6a82ea 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -148,6 +148,7 @@ impl ProtoRepr for proto::Config { }; Ok(Self::Type { + port: self.port.and_then(|x| x.try_into().ok()), server_addr: required(&self.server_addr) .and_then(|x| Ok(x.parse()?)) .context("server_addr")?, @@ -182,6 +183,7 @@ impl ProtoRepr for proto::Config { fn build(this: &Self::Type) -> Self { Self { + port: this.port.map(|x| x.into()), server_addr: Some(this.server_addr.to_string()), public_addr: Some(this.public_addr.0.clone()), max_payload_size: Some(this.max_payload_size.try_into().unwrap()), diff --git a/core/lib/protobuf_config/src/contracts.rs b/core/lib/protobuf_config/src/contracts.rs index 3141c7149ec..dc5b1c567e8 100644 --- a/core/lib/protobuf_config/src/contracts.rs +++ b/core/lib/protobuf_config/src/contracts.rs @@ -119,12 +119,6 @@ impl ProtoRepr for proto::Contracts { .map(|x| parse_h160(x)) .transpose() .context("base_token_addr")?, - l2_da_validator_addr: l2 - .da_validator_addr - .as_ref() - .map(|x| parse_h160(x)) - .transpose() - .context("l2_da_validator_addr")?, chain_admin_addr: l1 .chain_admin_addr .as_ref() @@ -132,6 +126,12 @@ impl ProtoRepr for proto::Contracts { .transpose() .context("chain_admin_addr")?, settlement_layer: self.settlement_layer, + l2_da_validator_addr: l2 + .da_validator_addr + .as_ref() + .map(|x| parse_h160(x)) + .transpose() + .context("l2_da_validator_addr")?, }) } diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index 1499e88efb4..a17a8711a27 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -1,10 +1,10 @@ use anyhow::Context; -use zksync_config::{ - configs::{ - da_client::DAClientConfig::{Avail, ObjectStore}, - {self}, +use zksync_config::configs::{ + self, + da_client::{ + avail::{AvailClientConfig, AvailConfig, AvailDefaultConfig, AvailGasRelayConfig}, + DAClientConfig::{Avail, ObjectStore}, }, - AvailConfig, }; use zksync_protobuf::{required, ProtoRepr}; @@ -18,15 +18,31 @@ impl ProtoRepr for proto::DataAvailabilityClient { let client = match config { proto::data_availability_client::Config::Avail(conf) => Avail(AvailConfig { - api_node_url: required(&conf.api_node_url) - .context("api_node_url")? - .clone(), bridge_api_url: required(&conf.bridge_api_url) .context("bridge_api_url")? .clone(), - app_id: *required(&conf.app_id).context("app_id")?, timeout: *required(&conf.timeout).context("timeout")? as usize, - max_retries: *required(&conf.max_retries).context("max_retries")? as usize, + config: match conf.config.as_ref() { + Some(proto::avail_config::Config::FullClient(full_client_conf)) => { + AvailClientConfig::FullClient(AvailDefaultConfig { + api_node_url: required(&full_client_conf.api_node_url) + .context("api_node_url")? + .clone(), + app_id: *required(&full_client_conf.app_id).context("app_id")?, + }) + } + Some(proto::avail_config::Config::GasRelay(gas_relay_conf)) => { + AvailClientConfig::GasRelay(AvailGasRelayConfig { + gas_relay_api_url: required(&gas_relay_conf.gas_relay_api_url) + .context("gas_relay_api_url")? + .clone(), + max_retries: *required(&gas_relay_conf.max_retries) + .context("max_retries")? + as usize, + }) + } + None => return Err(anyhow::anyhow!("Invalid Avail DA configuration")), + }, }), proto::data_availability_client::Config::ObjectStore(conf) => { ObjectStore(object_store_proto::ObjectStore::read(conf)?) @@ -41,11 +57,22 @@ impl ProtoRepr for proto::DataAvailabilityClient { Avail(config) => Self { config: Some(proto::data_availability_client::Config::Avail( proto::AvailConfig { - api_node_url: Some(config.api_node_url.clone()), bridge_api_url: Some(config.bridge_api_url.clone()), - app_id: Some(config.app_id), timeout: Some(config.timeout as u64), - max_retries: Some(config.max_retries as u64), + config: match &config.config { + AvailClientConfig::FullClient(conf) => Some( + proto::avail_config::Config::FullClient(proto::AvailClientConfig { + api_node_url: Some(conf.api_node_url.clone()), + app_id: Some(conf.app_id), + }), + ), + AvailClientConfig::GasRelay(conf) => Some( + proto::avail_config::Config::GasRelay(proto::AvailGasRelayConfig { + gas_relay_api_url: Some(conf.gas_relay_api_url.clone()), + max_retries: Some(conf.max_retries as u64), + }), + ), + }, }, )), }, diff --git a/core/lib/protobuf_config/src/en.rs b/core/lib/protobuf_config/src/en.rs index 9c07d1d3929..9d1a3931060 100644 --- a/core/lib/protobuf_config/src/en.rs +++ b/core/lib/protobuf_config/src/en.rs @@ -1,4 +1,7 @@ -use std::{num::NonZeroUsize, str::FromStr}; +use std::{ + num::{NonZeroU64, NonZeroUsize}, + str::FromStr, +}; use anyhow::Context; use zksync_basic_types::{url::SensitiveUrl, L1ChainId, L2ChainId}; @@ -36,6 +39,9 @@ impl ProtoRepr for proto::ExternalNode { .as_ref() .map(|a| a.parse().context("gateway_url")) .transpose()?, + bridge_addresses_refresh_interval_sec: self + .bridge_addresses_refresh_interval_sec + .and_then(NonZeroU64::new), }) } @@ -55,6 +61,9 @@ impl ProtoRepr for proto::ExternalNode { .gateway_url .as_ref() .map(|a| a.expose_str().to_string()), + bridge_addresses_refresh_interval_sec: this + .bridge_addresses_refresh_interval_sec + .map(|a| a.get()), } } } diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 4d41ee5036d..2f5ac5c35cf 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -1,7 +1,7 @@ use anyhow::Context as _; use zksync_config::configs::{self}; use zksync_protobuf::{required, ProtoRepr}; -use zksync_types::settlement::SettlementMode; +use zksync_types::{pubdata_da::PubdataSendingMode, settlement::SettlementMode}; use crate::{proto::eth as proto, read_optional_repr}; @@ -26,23 +26,21 @@ impl proto::ProofSendingMode { } impl proto::PubdataSendingMode { - fn new(x: &configs::eth_sender::PubdataSendingMode) -> Self { - use configs::eth_sender::PubdataSendingMode as From; + fn new(x: &PubdataSendingMode) -> Self { match x { - From::Calldata => Self::Calldata, - From::Blobs => Self::Blobs, - From::Custom => Self::Custom, - From::RelayedL2Calldata => Self::RelayedL2Calldata, + PubdataSendingMode::Calldata => Self::Calldata, + PubdataSendingMode::Blobs => Self::Blobs, + PubdataSendingMode::Custom => Self::Custom, + PubdataSendingMode::RelayedL2Calldata => Self::RelayedL2Calldata, } } - fn parse(&self) -> configs::eth_sender::PubdataSendingMode { - use configs::eth_sender::PubdataSendingMode as To; + fn parse(&self) -> PubdataSendingMode { match self { - Self::Calldata => To::Calldata, - Self::Blobs => To::Blobs, - Self::Custom => To::Custom, - Self::RelayedL2Calldata => To::RelayedL2Calldata, + Self::Calldata => PubdataSendingMode::Calldata, + Self::Blobs => PubdataSendingMode::Blobs, + Self::Custom => PubdataSendingMode::Custom, + Self::RelayedL2Calldata => PubdataSendingMode::RelayedL2Calldata, } } } @@ -136,6 +134,9 @@ impl ProtoRepr for proto::Sender { tx_aggregation_paused: self.tx_aggregation_only_prove_and_execute.unwrap_or(false), ignore_db_nonce: None, priority_tree_start_index: self.priority_op_start_index.map(|x| x as usize), + time_in_mempool_in_l1_blocks_cap: self + .time_in_mempool_in_l1_blocks_cap + .unwrap_or(Self::Type::default_time_in_mempool_in_l1_blocks_cap()), }) } @@ -169,6 +170,7 @@ impl ProtoRepr for proto::Sender { tx_aggregation_only_prove_and_execute: Some(this.tx_aggregation_only_prove_and_execute), tx_aggregation_paused: Some(this.tx_aggregation_paused), priority_op_start_index: this.priority_tree_start_index.map(|x| x as u64), + time_in_mempool_in_l1_blocks_cap: Some(this.time_in_mempool_in_l1_blocks_cap), } } } @@ -183,9 +185,9 @@ impl ProtoRepr for proto::GasAdjuster { .and_then(|x| Ok((*x).try_into()?)) .context("max_base_fee_samples")?, pricing_formula_parameter_a: *required(&self.pricing_formula_parameter_a) - .context("pricing_formula_parameter_a")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_a()), pricing_formula_parameter_b: *required(&self.pricing_formula_parameter_b) - .context("pricing_formula_parameter_b")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_b()), internal_l1_pricing_multiplier: *required(&self.internal_l1_pricing_multiplier) .context("internal_l1_pricing_multiplier")?, internal_enforced_l1_gas_price: self.internal_enforced_l1_gas_price, diff --git a/core/lib/protobuf_config/src/genesis.rs b/core/lib/protobuf_config/src/genesis.rs index 58d0448d83e..d2695f54dbf 100644 --- a/core/lib/protobuf_config/src/genesis.rs +++ b/core/lib/protobuf_config/src/genesis.rs @@ -76,6 +76,12 @@ impl ProtoRepr for proto::Genesis { .and_then(|x| parse_h256(x)) .context("default_aa_hash")?, ), + evm_emulator_hash: self + .evm_emulator_hash + .as_deref() + .map(parse_h256) + .transpose() + .context("evm_emulator_hash")?, l1_chain_id: required(&self.l1_chain_id) .map(|x| L1ChainId(*x)) .context("l1_chain_id")?, @@ -106,6 +112,7 @@ impl ProtoRepr for proto::Genesis { genesis_protocol_semantic_version: this.protocol_version.map(|x| x.to_string()), default_aa_hash: this.default_aa_hash.map(|x| format!("{:?}", x)), bootloader_hash: this.bootloader_hash.map(|x| format!("{:?}", x)), + evm_emulator_hash: this.evm_emulator_hash.map(|x| format!("{:?}", x)), fee_account: Some(format!("{:?}", this.fee_account)), l1_chain_id: Some(this.l1_chain_id.0), l2_chain_id: Some(this.l2_chain_id.as_u64()), diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index 059df4ebc19..885dd16e770 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -12,11 +12,14 @@ mod commitment_generator; mod consensus; mod contract_verifier; mod contracts; +mod da_client; mod da_dispatcher; mod database; mod en; mod eth; mod experimental; +mod external_price_api_client; +mod external_proof_integration_api; mod gateway; mod general; mod genesis; @@ -26,15 +29,12 @@ mod observability; mod proof_data_handler; pub mod proto; mod prover; +mod prover_autoscaler; +mod prover_job_monitor; mod pruning; mod secrets; -mod snapshots_creator; - -mod da_client; -mod external_price_api_client; -mod external_proof_integration_api; -mod prover_job_monitor; mod snapshot_recovery; +mod snapshots_creator; #[cfg(test)] mod tests; mod utils; @@ -65,24 +65,23 @@ pub fn read_optional_repr(field: &Option

) -> Option { .transpose() // This error will printed, only if the config partially filled, allows to debug config issues easier .map_err(|err| { - tracing::error!("Failed to serialize config: {err}"); + tracing::error!("Failed to parse config: {err:#}"); err }) .ok() .flatten() } -pub fn decode_yaml_repr( +/// Reads a yaml file. +pub fn read_yaml_repr( path: &PathBuf, deny_unknown_fields: bool, ) -> anyhow::Result { let yaml = std::fs::read_to_string(path).with_context(|| path.display().to_string())?; - let d = serde_yaml::Deserializer::from_str(&yaml); - let this: T = zksync_protobuf::serde::Deserialize { + zksync_protobuf::serde::Deserialize { deny_unknown_fields, } - .proto(d)?; - this.read() + .proto_repr_from_yaml::(&yaml) } pub fn encode_yaml_repr(value: &T::Type) -> anyhow::Result> { diff --git a/core/lib/protobuf_config/src/proof_data_handler.rs b/core/lib/protobuf_config/src/proof_data_handler.rs index 4b7bd2fd7c3..a587c702633 100644 --- a/core/lib/protobuf_config/src/proof_data_handler.rs +++ b/core/lib/protobuf_config/src/proof_data_handler.rs @@ -1,6 +1,7 @@ use anyhow::Context as _; use zksync_config::configs; use zksync_protobuf::{repr::ProtoRepr, required}; +use zksync_types::L1BatchNumber; use crate::proto::prover as proto; @@ -14,9 +15,15 @@ impl ProtoRepr for proto::ProofDataHandler { proof_generation_timeout_in_secs: required(&self.proof_generation_timeout_in_secs) .and_then(|x| Ok((*x).try_into()?)) .context("proof_generation_timeout_in_secs")?, - tee_support: required(&self.tee_support) - .copied() - .context("tee_support")?, + tee_config: configs::TeeConfig { + tee_support: self + .tee_support + .unwrap_or_else(configs::TeeConfig::default_tee_support), + first_tee_processed_batch: self + .first_tee_processed_batch + .map(|x| L1BatchNumber(x as u32)) + .unwrap_or_else(configs::TeeConfig::default_first_tee_processed_batch), + }, }) } @@ -24,7 +31,8 @@ impl ProtoRepr for proto::ProofDataHandler { Self { http_port: Some(this.http_port.into()), proof_generation_timeout_in_secs: Some(this.proof_generation_timeout_in_secs.into()), - tee_support: Some(this.tee_support), + tee_support: Some(this.tee_config.tee_support), + first_tee_processed_batch: Some(this.tee_config.first_tee_processed_batch.0 as u64), } } } diff --git a/core/lib/protobuf_config/src/proto/config/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto index c0d03ea7818..c97c4f3fbe2 100644 --- a/core/lib/protobuf_config/src/proto/config/api.proto +++ b/core/lib/protobuf_config/src/proto/config/api.proto @@ -41,7 +41,8 @@ message Web3JsonRpc { repeated string api_namespaces = 32; // Optional, if empty all namespaces are available optional bool extended_api_tracing = 33; // optional, default false optional bool estimate_gas_optimize_search = 34; // optional, default false - optional string settlement_layer_url = 35; // optional + optional uint32 latest_values_max_block_lag = 35; // optional + optional string settlement_layer_url = 36; // optional reserved 15; reserved "l1_to_l2_transactions_compatibility_mode"; reserved 11; reserved "request_timeout"; diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index d01bda2c847..73fa2435996 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -5,12 +5,26 @@ package zksync.config.da_client; import "zksync/config/object_store.proto"; message AvailConfig { - optional string api_node_url = 1; optional string bridge_api_url = 2; - optional uint32 app_id = 4; optional uint64 timeout = 5; - optional uint64 max_retries = 6; + oneof config { + AvailClientConfig full_client = 7; + AvailGasRelayConfig gas_relay = 8; + } + reserved 1; reserved "api_node_url"; reserved 3; reserved "seed"; + reserved 4; reserved "app_id"; + reserved 6; reserved "max_retries"; +} + +message AvailClientConfig { + optional string api_node_url = 1; + optional uint32 app_id = 2; +} + +message AvailGasRelayConfig { + optional string gas_relay_api_url = 1; + optional uint64 max_retries = 2; } message DataAvailabilityClient { diff --git a/core/lib/protobuf_config/src/proto/config/en.proto b/core/lib/protobuf_config/src/proto/config/en.proto index d8a13d31d4b..69412704ea0 100644 --- a/core/lib/protobuf_config/src/proto/config/en.proto +++ b/core/lib/protobuf_config/src/proto/config/en.proto @@ -10,4 +10,5 @@ message ExternalNode { optional uint64 main_node_rate_limit_rps = 6; // optional optional config.genesis.L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 7; // optional, default to rollup optional string gateway_url = 8; // optional + optional uint64 bridge_addresses_refresh_interval_sec = 9; // optional } diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index 33270efd1f2..24d30bc6187 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -53,7 +53,8 @@ message Sender { reserved 19; reserved "proof_loading_mode"; optional bool tx_aggregation_paused = 20; // required optional bool tx_aggregation_only_prove_and_execute = 21; // required - optional uint64 priority_op_start_index = 22; // optional + optional uint32 time_in_mempool_in_l1_blocks_cap = 22; // optional + optional uint64 priority_op_start_index = 23; // optional } message GasAdjuster { diff --git a/core/lib/protobuf_config/src/proto/config/genesis.proto b/core/lib/protobuf_config/src/proto/config/genesis.proto index 54af7a459f9..6559595ae61 100644 --- a/core/lib/protobuf_config/src/proto/config/genesis.proto +++ b/core/lib/protobuf_config/src/proto/config/genesis.proto @@ -28,6 +28,7 @@ message Genesis { optional Prover prover = 10; optional L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 29; // optional, default to rollup optional string genesis_protocol_semantic_version = 12; // optional; - optional uint64 sl_chain_id = 13; // required; + optional string evm_emulator_hash = 13; // optional; h256 + optional uint64 sl_chain_id = 14; // required; reserved 11; reserved "shared_bridge"; } diff --git a/core/lib/protobuf_config/src/proto/config/prover.proto b/core/lib/protobuf_config/src/proto/config/prover.proto index 4fe3861183b..92ba770a756 100644 --- a/core/lib/protobuf_config/src/proto/config/prover.proto +++ b/core/lib/protobuf_config/src/proto/config/prover.proto @@ -107,5 +107,6 @@ message WitnessVectorGenerator { message ProofDataHandler { optional uint32 http_port = 1; // required; u16 optional uint32 proof_generation_timeout_in_secs = 2; // required; s - optional bool tee_support = 3; // required + optional bool tee_support = 3; // optional + optional uint64 first_tee_processed_batch = 4; // optional } diff --git a/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto new file mode 100644 index 00000000000..9b7f201e9b7 --- /dev/null +++ b/core/lib/protobuf_config/src/proto/config/prover_autoscaler.proto @@ -0,0 +1,59 @@ +syntax = "proto3"; + +package zksync.config.prover_autoscaler; + +import "zksync/std.proto"; +import "zksync/config/observability.proto"; + +message ProverAutoscalerConfig { + optional std.Duration graceful_shutdown_timeout = 1; // optional + optional ProverAutoscalerAgentConfig agent_config = 2; // optional + optional ProverAutoscalerScalerConfig scaler_config = 3; // optional + optional observability.Observability observability = 4; // optional +} + +message ProverAutoscalerAgentConfig { + optional uint32 prometheus_port = 1; // required + optional uint32 http_port = 2; // required + repeated string namespaces = 3; // optional + optional string cluster_name = 4; // optional + optional bool dry_run = 5; // optional +} + +message ProtocolVersion { + optional string namespace = 1; // required + optional string protocol_version = 2; // required +} + +message ClusterPriority { + optional string cluster = 1; // required + optional uint32 priority = 2; // required +} + +message ProverSpeed { + optional string gpu = 1; // required + optional uint32 speed = 2; // required +} + +message MaxProver { + optional string cluster_and_gpu = 1; // required, format: / + optional uint32 max = 2; // required +} + +message MinProver { + optional string namespace = 1; // required + optional uint32 min = 2; // required +} + +message ProverAutoscalerScalerConfig { + optional uint32 prometheus_port = 1; // required + optional std.Duration scaler_run_interval = 2; // optional + optional string prover_job_monitor_url = 3; // required + repeated string agents = 4; // required at least one + repeated ProtocolVersion protocol_versions = 5; // repeated at least one + repeated ClusterPriority cluster_priorities = 6; // optional + repeated ProverSpeed prover_speed = 7; // optional + optional uint32 long_pending_duration_s = 8; // optional + repeated MaxProver max_provers = 9; // optional + repeated MinProver min_provers = 10; // optional +} diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index a4e8c1d60dd..74f468627f8 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -22,6 +22,7 @@ message ConsensusSecrets { message AvailSecret { optional string seed_phrase = 1; + optional string gas_relay_api_key = 2; } message DataAvailabilitySecrets { diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 92527df739a..9b0d69e7270 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -70,6 +70,9 @@ message Config { reserved 3; reserved "validators"; + // Port to listen on, for incoming TCP connections. + optional uint32 port = 12; + // IP:port to listen on, for incoming TCP connections. // Use `0.0.0.0:` to listen on all network interfaces (i.e. on all IPs exposed by this VM). optional string server_addr = 1; // required; IpAddr diff --git a/core/lib/protobuf_config/src/prover_autoscaler.rs b/core/lib/protobuf_config/src/prover_autoscaler.rs new file mode 100644 index 00000000000..51f1b162d4c --- /dev/null +++ b/core/lib/protobuf_config/src/prover_autoscaler.rs @@ -0,0 +1,240 @@ +use std::collections::HashMap; + +use anyhow::Context; +use time::Duration; +use zksync_config::configs::{self, prover_autoscaler::Gpu}; +use zksync_protobuf::{read_optional, repr::ProtoRepr, required, ProtoFmt}; + +use crate::{proto::prover_autoscaler as proto, read_optional_repr}; + +impl ProtoRepr for proto::ProverAutoscalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + graceful_shutdown_timeout: read_optional(&self.graceful_shutdown_timeout) + .context("graceful_shutdown_timeout")? + .unwrap_or(Self::Type::default_graceful_shutdown_timeout()), + agent_config: read_optional_repr(&self.agent_config), + scaler_config: read_optional_repr(&self.scaler_config), + observability: read_optional_repr(&self.observability), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + graceful_shutdown_timeout: Some(ProtoFmt::build(&this.graceful_shutdown_timeout)), + agent_config: this.agent_config.as_ref().map(ProtoRepr::build), + scaler_config: this.scaler_config.as_ref().map(ProtoRepr::build), + observability: this.observability.as_ref().map(ProtoRepr::build), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerAgentConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerAgentConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, + namespaces: self.namespaces.to_vec(), + cluster_name: Some("".to_string()), + dry_run: self.dry_run.unwrap_or(Self::Type::default_dry_run()), + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + http_port: Some(this.http_port.into()), + namespaces: this.namespaces.clone(), + cluster_name: this.cluster_name.clone(), + dry_run: Some(this.dry_run), + } + } +} + +impl ProtoRepr for proto::ProverAutoscalerScalerConfig { + type Type = configs::prover_autoscaler::ProverAutoscalerScalerConfig; + fn read(&self) -> anyhow::Result { + Ok(Self::Type { + prometheus_port: required(&self.prometheus_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("prometheus_port")?, + scaler_run_interval: read_optional(&self.scaler_run_interval) + .context("scaler_run_interval")? + .unwrap_or(Self::Type::default_scaler_run_interval()), + prover_job_monitor_url: required(&self.prover_job_monitor_url) + .context("prover_job_monitor_url")? + .clone(), + agents: self.agents.to_vec(), + protocol_versions: self + .protocol_versions + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("protocol_versions")?, + cluster_priorities: self + .cluster_priorities + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("cluster_priorities")?, + prover_speed: self + .prover_speed + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("prover_speed")?, + long_pending_duration: match self.long_pending_duration_s { + Some(s) => Duration::seconds(s.into()), + None => Self::Type::default_long_pending_duration(), + }, + max_provers: self.max_provers.iter().fold(HashMap::new(), |mut acc, e| { + let (cluster_and_gpu, max) = e.read().expect("max_provers"); + if let Some((cluster, gpu)) = cluster_and_gpu.split_once('/') { + acc.entry(cluster.to_string()) + .or_default() + .insert(gpu.parse().expect("max_provers/gpu"), max); + } + acc + }), + min_provers: self + .min_provers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("min_provers")?, + }) + } + + fn build(this: &Self::Type) -> Self { + Self { + prometheus_port: Some(this.prometheus_port.into()), + scaler_run_interval: Some(ProtoFmt::build(&this.scaler_run_interval)), + prover_job_monitor_url: Some(this.prover_job_monitor_url.clone()), + agents: this.agents.clone(), + protocol_versions: this + .protocol_versions + .iter() + .map(|(k, v)| proto::ProtocolVersion::build(&(k.clone(), v.clone()))) + .collect(), + cluster_priorities: this + .cluster_priorities + .iter() + .map(|(k, v)| proto::ClusterPriority::build(&(k.clone(), *v))) + .collect(), + prover_speed: this + .prover_speed + .iter() + .map(|(k, v)| proto::ProverSpeed::build(&(*k, *v))) + .collect(), + long_pending_duration_s: Some(this.long_pending_duration.whole_seconds() as u32), + max_provers: this + .max_provers + .iter() + .flat_map(|(cluster, inner_map)| { + inner_map.iter().map(move |(gpu, max)| { + proto::MaxProver::build(&(format!("{}/{}", cluster, gpu), *max)) + }) + }) + .collect(), + min_provers: this + .min_provers + .iter() + .map(|(k, v)| proto::MinProver::build(&(k.clone(), *v))) + .collect(), + } + } +} + +impl ProtoRepr for proto::ProtocolVersion { + type Type = (String, String); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.namespace).context("namespace")?.clone(), + required(&self.protocol_version) + .context("protocol_version")? + .clone(), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + namespace: Some(this.0.clone()), + protocol_version: Some(this.1.clone()), + } + } +} + +impl ProtoRepr for proto::ClusterPriority { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster).context("cluster")?.clone(), + *required(&self.priority).context("priority")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster: Some(this.0.clone()), + priority: Some(this.1), + } + } +} + +impl ProtoRepr for proto::ProverSpeed { + type Type = (Gpu, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.gpu).context("gpu")?.parse()?, + *required(&self.speed).context("speed")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + gpu: Some(this.0.to_string()), + speed: Some(this.1), + } + } +} + +impl ProtoRepr for proto::MaxProver { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.cluster_and_gpu) + .context("cluster_and_gpu")? + .parse()?, + *required(&self.max).context("max")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + cluster_and_gpu: Some(this.0.to_string()), + max: Some(this.1), + } + } +} + +impl ProtoRepr for proto::MinProver { + type Type = (String, u32); + fn read(&self) -> anyhow::Result { + Ok(( + required(&self.namespace).context("namespace")?.clone(), + *required(&self.min).context("min")?, + )) + } + fn build(this: &Self::Type) -> Self { + Self { + namespace: Some(this.0.to_string()), + min: Some(this.1), + } + } +} diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index 5af7901a36a..b7e300ad910 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -2,7 +2,7 @@ use std::str::FromStr; use anyhow::Context; use secrecy::ExposeSecret; -use zksync_basic_types::{seed_phrase::SeedPhrase, url::SensitiveUrl}; +use zksync_basic_types::{api_key::APIKey, seed_phrase::SeedPhrase, url::SensitiveUrl}; use zksync_config::configs::{ consensus::{AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, da_client::avail::AvailSecrets, @@ -112,14 +112,31 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { let secrets = required(&self.da_secrets).context("config")?; let client = match secrets { - DaSecrets::Avail(avail_secret) => DataAvailabilitySecrets::Avail(AvailSecrets { - seed_phrase: Some( - SeedPhrase::from_str( - required(&avail_secret.seed_phrase).context("seed_phrase")?, - ) - .unwrap(), - ), - }), + DaSecrets::Avail(avail_secret) => { + let seed_phrase = match avail_secret.seed_phrase.as_ref() { + Some(seed) => match SeedPhrase::from_str(seed) { + Ok(seed) => Some(seed), + Err(_) => None, + }, + None => None, + }; + let gas_relay_api_key = match avail_secret.gas_relay_api_key.as_ref() { + Some(api_key) => match APIKey::from_str(api_key) { + Ok(api_key) => Some(api_key), + Err(_) => None, + }, + None => None, + }; + if seed_phrase.is_none() && gas_relay_api_key.is_none() { + return Err(anyhow::anyhow!( + "At least one of seed_phrase or gas_relay_api_key must be provided" + )); + } + DataAvailabilitySecrets::Avail(AvailSecrets { + seed_phrase, + gas_relay_api_key, + }) + } }; Ok(client) @@ -142,7 +159,24 @@ impl ProtoRepr for proto::DataAvailabilitySecrets { None }; - Some(DaSecrets::Avail(AvailSecret { seed_phrase })) + let gas_relay_api_key = if config.gas_relay_api_key.is_some() { + Some( + config + .clone() + .gas_relay_api_key + .unwrap() + .0 + .expose_secret() + .to_string(), + ) + } else { + None + }; + + Some(DaSecrets::Avail(AvailSecret { + seed_phrase, + gas_relay_api_key, + })) } }; diff --git a/core/lib/protobuf_config/src/tests.rs b/core/lib/protobuf_config/src/tests.rs index d653b9b92bf..c72bce0bf9a 100644 --- a/core/lib/protobuf_config/src/tests.rs +++ b/core/lib/protobuf_config/src/tests.rs @@ -2,7 +2,7 @@ use std::{path::PathBuf, str::FromStr}; use zksync_protobuf::testonly::{test_encode_all_formats, ReprConv}; -use crate::{decode_yaml_repr, proto}; +use crate::{proto, read_yaml_repr}; /// Tests config <-> proto (boilerplate) conversions. #[test] @@ -60,14 +60,11 @@ fn test_encoding() { #[test] fn verify_file_parsing() { let base_path = PathBuf::from_str("../../../etc/env/file_based/").unwrap(); - decode_yaml_repr::(&base_path.join("general.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("general.yaml"), true).unwrap(); // It's allowed to have unknown fields in wallets, e.g. we keep private key for fee account - decode_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); - decode_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("contracts.yaml"), true) - .unwrap(); - decode_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); - decode_yaml_repr::(&base_path.join("external_node.yaml"), true) - .unwrap(); + read_yaml_repr::(&base_path.join("wallets.yaml"), false).unwrap(); + read_yaml_repr::(&base_path.join("genesis.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("contracts.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("secrets.yaml"), true).unwrap(); + read_yaml_repr::(&base_path.join("external_node.yaml"), true).unwrap(); } diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index 776cd3141cb..acf104cc4c6 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -31,7 +31,7 @@ pub enum ProofGenerationDataResponse { } #[derive(Debug, Serialize, Deserialize)] -pub struct TeeProofGenerationDataResponse(pub Option>); +pub struct TeeProofGenerationDataResponse(pub Box); #[derive(Debug, Serialize, Deserialize)] pub enum SubmitProofResponse { diff --git a/core/lib/prover_interface/src/inputs.rs b/core/lib/prover_interface/src/inputs.rs index 22a20223c8b..cfc1d4a0d55 100644 --- a/core/lib/prover_interface/src/inputs.rs +++ b/core/lib/prover_interface/src/inputs.rs @@ -3,9 +3,9 @@ use std::{collections::HashMap, convert::TryInto, fmt::Debug}; use serde::{Deserialize, Serialize}; use serde_with::{serde_as, Bytes}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; +use zksync_object_store::{_reexports::BoxedError, serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{ - basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, + basic_fri_types::Eip4844Blobs, block::L2BlockExecutionData, commitment::PubdataParams, witness_block_state::WitnessStorageState, L1BatchNumber, ProtocolVersionId, H256, U256, }; @@ -136,8 +136,25 @@ impl WitnessInputMerklePaths { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct VMRunWitnessInputData { + pub l1_batch_number: L1BatchNumber, + pub used_bytecodes: HashMap>, + pub initial_heap_content: Vec<(usize, U256)>, + pub protocol_version: ProtocolVersionId, + pub bootloader_code: Vec<[u8; 32]>, + pub default_account_code_hash: U256, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub evm_emulator_code_hash: Option, + pub storage_refunds: Vec, + pub pubdata_costs: Vec, + pub witness_block_state: WitnessStorageState, +} + +// skip_serializing_if for field evm_emulator_code_hash doesn't work fine with bincode, +// so we are implementing custom deserialization for it +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VMRunWitnessInputDataLegacy { pub l1_batch_number: L1BatchNumber, pub used_bytecodes: HashMap>, pub initial_heap_content: Vec<(usize, U256)>, @@ -149,6 +166,23 @@ pub struct VMRunWitnessInputData { pub witness_block_state: WitnessStorageState, } +impl From for VMRunWitnessInputData { + fn from(value: VMRunWitnessInputDataLegacy) -> Self { + Self { + l1_batch_number: value.l1_batch_number, + used_bytecodes: value.used_bytecodes, + initial_heap_content: value.initial_heap_content, + protocol_version: value.protocol_version, + bootloader_code: value.bootloader_code, + default_account_code_hash: value.default_account_code_hash, + evm_emulator_code_hash: None, + storage_refunds: value.storage_refunds, + pubdata_costs: value.pubdata_costs, + witness_block_state: value.witness_block_state, + } + } +} + impl StoredObject for VMRunWitnessInputData { const BUCKET: Bucket = Bucket::WitnessInput; @@ -158,10 +192,20 @@ impl StoredObject for VMRunWitnessInputData { format!("vm_run_data_{key}.bin") } - serialize_using_bincode!(); + fn serialize(&self) -> Result, BoxedError> { + zksync_object_store::bincode::serialize(self).map_err(Into::into) + } + + fn deserialize(bytes: Vec) -> Result { + zksync_object_store::bincode::deserialize::(&bytes).or_else(|_| { + zksync_object_store::bincode::deserialize::(&bytes) + .map(Into::into) + .map_err(Into::into) + }) + } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct WitnessInputData { pub vm_run_data: VMRunWitnessInputData, pub merkle_paths: WitnessInputMerklePaths, @@ -169,6 +213,25 @@ pub struct WitnessInputData { pub eip_4844_blobs: Eip4844Blobs, } +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct WitnessInputDataLegacy { + pub vm_run_data: VMRunWitnessInputDataLegacy, + pub merkle_paths: WitnessInputMerklePaths, + pub previous_batch_metadata: L1BatchMetadataHashes, + pub eip_4844_blobs: Eip4844Blobs, +} + +impl From for WitnessInputData { + fn from(value: WitnessInputDataLegacy) -> Self { + Self { + vm_run_data: value.vm_run_data.into(), + merkle_paths: value.merkle_paths, + previous_batch_metadata: value.previous_batch_metadata, + eip_4844_blobs: value.eip_4844_blobs, + } + } +} + impl StoredObject for WitnessInputData { const BUCKET: Bucket = Bucket::WitnessInput; @@ -178,10 +241,20 @@ impl StoredObject for WitnessInputData { format!("witness_inputs_{key}.bin") } - serialize_using_bincode!(); + fn serialize(&self) -> Result, BoxedError> { + zksync_object_store::bincode::serialize(self).map_err(Into::into) + } + + fn deserialize(bytes: Vec) -> Result { + zksync_object_store::bincode::deserialize::(&bytes).or_else(|_| { + zksync_object_store::bincode::deserialize::(&bytes) + .map(Into::into) + .map_err(Into::into) + }) + } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct L1BatchMetadataHashes { pub root_hash: H256, pub meta_hash: H256, @@ -191,27 +264,30 @@ pub struct L1BatchMetadataHashes { /// Version 1 of the data used as input for the TEE verifier. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct V1TeeVerifierInput { - pub witness_input_merkle_paths: WitnessInputMerklePaths, + pub vm_run_data: VMRunWitnessInputData, + pub merkle_paths: WitnessInputMerklePaths, pub l2_blocks_execution_data: Vec, pub l1_batch_env: L1BatchEnv, pub system_env: SystemEnv, - pub used_contracts: Vec<(H256, Vec)>, + pub pubdata_params: PubdataParams, } impl V1TeeVerifierInput { pub fn new( - witness_input_merkle_paths: WitnessInputMerklePaths, + vm_run_data: VMRunWitnessInputData, + merkle_paths: WitnessInputMerklePaths, l2_blocks_execution_data: Vec, l1_batch_env: L1BatchEnv, system_env: SystemEnv, - used_contracts: Vec<(H256, Vec)>, + pubdata_params: PubdataParams, ) -> Self { V1TeeVerifierInput { - witness_input_merkle_paths, + vm_run_data, + merkle_paths, l2_blocks_execution_data, l1_batch_env, system_env, - used_contracts, + pubdata_params, } } } @@ -232,17 +308,6 @@ impl TeeVerifierInput { } } -impl StoredObject for TeeVerifierInput { - const BUCKET: Bucket = Bucket::TeeVerifierInput; - type Key<'a> = L1BatchNumber; - - fn encode_key(key: Self::Key<'_>) -> String { - format!("tee_verifier_input_for_l1_batch_{key}.bin") - } - - serialize_using_bincode!(); -} - #[cfg(test)] mod tests { use super::*; diff --git a/core/lib/snapshots_applier/src/tests/utils.rs b/core/lib/snapshots_applier/src/tests/utils.rs index 7c569f73a5a..cf68d2e181a 100644 --- a/core/lib/snapshots_applier/src/tests/utils.rs +++ b/core/lib/snapshots_applier/src/tests/utils.rs @@ -174,7 +174,6 @@ pub(super) fn mock_l2_block_header(l2_block_number: L2BlockNumber) -> L2BlockHea l1_tx_count: 0, l2_tx_count: 0, fee_account_address: Address::repeat_byte(1), - pubdata_params: Default::default(), base_fee_per_gas: 0, gas_per_pubdata_limit: 0, batch_fee_input: Default::default(), @@ -183,6 +182,7 @@ pub(super) fn mock_l2_block_header(l2_block_number: L2BlockNumber) -> L2BlockHea virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } diff --git a/core/lib/state/src/cache/lru_cache.rs b/core/lib/state/src/cache/lru_cache.rs index fa37bdb3e22..55b037bbb8c 100644 --- a/core/lib/state/src/cache/lru_cache.rs +++ b/core/lib/state/src/cache/lru_cache.rs @@ -46,6 +46,13 @@ where Self { name, cache } } + /// Returns the capacity of this cache in bytes. + pub fn capacity(&self) -> u64 { + self.cache + .as_ref() + .map_or(0, |cache| cache.policy().max_capacity().unwrap_or(u64::MAX)) + } + /// Gets an entry and pulls it to the front if it exists. pub fn get(&self, key: &K) -> Option { let latency = METRICS.latency[&(self.name, Method::Get)].start(); diff --git a/core/lib/state/src/postgres/mod.rs b/core/lib/state/src/postgres/mod.rs index 67866634ee4..f689f1487f3 100644 --- a/core/lib/state/src/postgres/mod.rs +++ b/core/lib/state/src/postgres/mod.rs @@ -72,8 +72,7 @@ impl CacheValue for TimestampedStorageValue { #[allow(clippy::cast_possible_truncation)] // doesn't happen in practice fn cache_weight(&self) -> u32 { const WEIGHT: usize = mem::size_of::() + mem::size_of::(); - // ^ Since values are small in size, we want to account for key sizes as well - + // ^ Since values are small, we want to account for key sizes as well WEIGHT as u32 } } @@ -114,6 +113,14 @@ impl ValuesCache { Self(Arc::new(RwLock::new(inner))) } + fn capacity(&self) -> u64 { + self.0 + .read() + .expect("values cache is poisoned") + .values + .capacity() + } + /// *NB.* The returned value should be considered immediately stale; at best, it can be /// the lower boundary on the current `valid_for` value. fn valid_for(&self) -> L2BlockNumber { @@ -154,80 +161,86 @@ impl ValuesCache { } } + fn reset( + &self, + from_l2_block: L2BlockNumber, + to_l2_block: L2BlockNumber, + ) -> anyhow::Result<()> { + // We can spend too much time loading data from Postgres, so we opt for an easier "update" route: + // evict *everything* from cache and call it a day. This should not happen too often in practice. + tracing::info!( + "Storage values cache is too far behind (current L2 block is {from_l2_block}; \ + requested update to {to_l2_block}); resetting the cache" + ); + let mut lock = self + .0 + .write() + .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; + anyhow::ensure!( + lock.valid_for == from_l2_block, + "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ + valid for L2 block #{}", + lock.valid_for + ); + lock.valid_for = to_l2_block; + lock.values.clear(); + + CACHE_METRICS.values_emptied.inc(); + CACHE_METRICS + .values_valid_for_miniblock + .set(u64::from(to_l2_block.0)); + Ok(()) + } + async fn update( &self, from_l2_block: L2BlockNumber, to_l2_block: L2BlockNumber, connection: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { - const MAX_L2_BLOCKS_LAG: u32 = 5; - tracing::debug!( "Updating storage values cache from L2 block {from_l2_block} to {to_l2_block}" ); - if to_l2_block.0 - from_l2_block.0 > MAX_L2_BLOCKS_LAG { - // We can spend too much time loading data from Postgres, so we opt for an easier "update" route: - // evict *everything* from cache and call it a day. This should not happen too often in practice. - tracing::info!( - "Storage values cache is too far behind (current L2 block is {from_l2_block}; \ - requested update to {to_l2_block}); resetting the cache" - ); - let mut lock = self - .0 - .write() - .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; - anyhow::ensure!( - lock.valid_for == from_l2_block, - "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ - valid for L2 block #{}", - lock.valid_for - ); - lock.valid_for = to_l2_block; - lock.values.clear(); + let update_latency = CACHE_METRICS.values_update[&ValuesUpdateStage::LoadKeys].start(); + let l2_blocks = (from_l2_block + 1)..=to_l2_block; + let modified_keys = connection + .storage_logs_dal() + .modified_keys_in_l2_blocks(l2_blocks.clone()) + .await?; - CACHE_METRICS.values_emptied.inc(); - } else { - let update_latency = CACHE_METRICS.values_update[&ValuesUpdateStage::LoadKeys].start(); - let l2_blocks = (from_l2_block + 1)..=to_l2_block; - let modified_keys = connection - .storage_logs_dal() - .modified_keys_in_l2_blocks(l2_blocks.clone()) - .await?; - - let elapsed = update_latency.observe(); - CACHE_METRICS - .values_update_modified_keys - .observe(modified_keys.len()); - tracing::debug!( - "Loaded {modified_keys_len} modified storage keys from L2 blocks {l2_blocks:?}; \ - took {elapsed:?}", - modified_keys_len = modified_keys.len() - ); + let elapsed = update_latency.observe(); + CACHE_METRICS + .values_update_modified_keys + .observe(modified_keys.len()); + tracing::debug!( + "Loaded {modified_keys_len} modified storage keys from L2 blocks {l2_blocks:?}; \ + took {elapsed:?}", + modified_keys_len = modified_keys.len() + ); - let update_latency = - CACHE_METRICS.values_update[&ValuesUpdateStage::RemoveStaleKeys].start(); - let mut lock = self - .0 - .write() - .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; - // The code below holding onto the write `lock` is the only code that can theoretically poison the `RwLock` - // (other than emptying the cache above). Thus, it's kept as simple and tight as possible. - // E.g., we load data from Postgres beforehand. - anyhow::ensure!( - lock.valid_for == from_l2_block, - "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ - valid for L2 block #{}", - lock.valid_for - ); - lock.valid_for = to_l2_block; - for modified_key in &modified_keys { - lock.values.remove(modified_key); - } - lock.values.report_size(); - drop(lock); - update_latency.observe(); + let update_latency = + CACHE_METRICS.values_update[&ValuesUpdateStage::RemoveStaleKeys].start(); + let mut lock = self + .0 + .write() + .map_err(|_| anyhow::anyhow!("values cache is poisoned"))?; + // The code below holding onto the write `lock` is the only code that can theoretically poison the `RwLock` + // (other than emptying the cache above). Thus, it's kept as simple and tight as possible. + // E.g., we load data from Postgres beforehand. + anyhow::ensure!( + lock.valid_for == from_l2_block, + "sanity check failed: values cache was expected to be valid for L2 block #{from_l2_block}, but it's actually \ + valid for L2 block #{}", + lock.valid_for + ); + lock.valid_for = to_l2_block; + for modified_key in &modified_keys { + lock.values.remove(modified_key); } + lock.values.report_size(); + drop(lock); + update_latency.observe(); CACHE_METRICS .values_valid_for_miniblock @@ -298,6 +311,7 @@ impl PostgresStorageCaches { pub fn configure_storage_values_cache( &mut self, capacity: u64, + max_l2_blocks_lag: u32, connection_pool: ConnectionPool, ) -> PostgresStorageCachesTask { assert!( @@ -320,6 +334,7 @@ impl PostgresStorageCaches { PostgresStorageCachesTask { connection_pool, values_cache, + max_l2_blocks_lag, command_receiver, } } @@ -349,6 +364,7 @@ impl PostgresStorageCaches { pub struct PostgresStorageCachesTask { connection_pool: ConnectionPool, values_cache: ValuesCache, + max_l2_blocks_lag: u32, command_receiver: UnboundedReceiver, } @@ -359,32 +375,41 @@ impl PostgresStorageCachesTask { /// /// - Propagates Postgres errors. /// - Propagates errors from the cache update task. + #[tracing::instrument(name = "PostgresStorageCachesTask::run", skip_all)] pub async fn run(mut self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + tracing::info!( + max_l2_blocks_lag = self.max_l2_blocks_lag, + values_cache.capacity = self.values_cache.capacity(), + "Starting task" + ); + let mut current_l2_block = self.values_cache.valid_for(); loop { - tokio::select! { - _ = stop_receiver.changed() => { - break; - } - Some(to_l2_block) = self.command_receiver.recv() => { - if to_l2_block <= current_l2_block { - continue; - } - let mut connection = self - .connection_pool - .connection_tagged("values_cache_updater") - .await?; - self.values_cache - .update(current_l2_block, to_l2_block, &mut connection) - .await?; - current_l2_block = to_l2_block; - } + let to_l2_block = tokio::select! { + _ = stop_receiver.changed() => break, + Some(to_l2_block) = self.command_receiver.recv() => to_l2_block, else => { // The command sender has been dropped, which means that we must receive the stop signal soon. stop_receiver.changed().await?; break; } + }; + if to_l2_block <= current_l2_block { + continue; + } + + if to_l2_block.0 - current_l2_block.0 > self.max_l2_blocks_lag { + self.values_cache.reset(current_l2_block, to_l2_block)?; + } else { + let mut connection = self + .connection_pool + .connection_tagged("values_cache_updater") + .await?; + self.values_cache + .update(current_l2_block, to_l2_block, &mut connection) + .await?; } + current_l2_block = to_l2_block; } Ok(()) } diff --git a/core/lib/state/src/postgres/tests.rs b/core/lib/state/src/postgres/tests.rs index f88055fa047..029df60cb46 100644 --- a/core/lib/state/src/postgres/tests.rs +++ b/core/lib/state/src/postgres/tests.rs @@ -462,7 +462,7 @@ async fn wait_for_cache_update(values_cache: &ValuesCache, target_l2_block: L2Bl fn test_values_cache(pool: &ConnectionPool, rt_handle: Handle) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); - let task = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); + let task = caches.configure_storage_values_cache(1_024 * 1_024, 5, pool.clone()); let (stop_sender, stop_receiver) = watch::channel(false); let update_task_handle = tokio::task::spawn(task.run(stop_receiver)); @@ -595,7 +595,7 @@ fn mini_fuzz_values_cache_inner( mut rt_handle: Handle, ) { let mut caches = PostgresStorageCaches::new(1_024, 1_024); - let _ = caches.configure_storage_values_cache(1_024 * 1_024, pool.clone()); + let _ = caches.configure_storage_values_cache(1_024 * 1_024, 5, pool.clone()); let values_cache = caches.values.as_ref().unwrap().cache.clone(); let mut connection = rt_handle.block_on(pool.connection()).unwrap(); diff --git a/core/lib/state/src/test_utils.rs b/core/lib/state/src/test_utils.rs index 5406cbb7ddf..a12508f615f 100644 --- a/core/lib/state/src/test_utils.rs +++ b/core/lib/state/src/test_utils.rs @@ -80,7 +80,6 @@ pub(crate) async fn create_l2_block( l1_tx_count: 0, l2_tx_count: 0, fee_account_address: Address::default(), - pubdata_params: Default::default(), base_fee_per_gas: 0, batch_fee_input: Default::default(), gas_per_pubdata_limit: 0, @@ -89,6 +88,7 @@ pub(crate) async fn create_l2_block( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/lib/tee_verifier/Cargo.toml b/core/lib/tee_verifier/Cargo.toml index 6828eeef8b1..331c47e365e 100644 --- a/core/lib/tee_verifier/Cargo.toml +++ b/core/lib/tee_verifier/Cargo.toml @@ -11,18 +11,21 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_multivm.workspace = true zksync_config.workspace = true zksync_crypto_primitives.workspace = true zksync_merkle_tree.workspace = true -zksync_object_store.workspace = true +zksync_multivm.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true zksync_utils.workspace = true anyhow.workspace = true +once_cell.workspace = true serde.workspace = true tracing.workspace = true [dev-dependencies] zksync_contracts.workspace = true +zksync_prover_interface.workspace = true + +bincode.workspace = true diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 595480687e9..140085dbb9f 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -4,27 +4,29 @@ //! executing the VM and verifying all the accessed memory slots by their //! merkle path. -use std::{cell::RefCell, rc::Rc}; - -use anyhow::Context; +use anyhow::{bail, Context, Result}; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_merkle_tree::{ BlockOutputWithProofs, TreeInstruction, TreeLogEntry, TreeLogEntryWithProof, ValueHash, }; use zksync_multivm::{ interface::{ - storage::{InMemoryStorage, ReadStorage, StorageView}, + storage::{ReadStorage, StorageSnapshot, StorageView}, FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, + pubdata_builders::pubdata_params_to_builder, vm_latest::HistoryEnabled, LegacyVmInstance, }; use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, StorageLog, Transaction, H256}; -use zksync_utils::bytecode::hash_bytecode; +use zksync_types::{ + block::L2BlockExecutionData, commitment::PubdataParams, L1BatchNumber, StorageLog, + StorageValue, Transaction, H256, +}; +use zksync_utils::u256_to_h256; /// A structure to hold the result of verification. pub struct VerificationResult { @@ -50,29 +52,47 @@ impl Verify for V1TeeVerifierInput { /// not actionable. fn verify(self) -> anyhow::Result { let old_root_hash = self.l1_batch_env.previous_batch_hash.unwrap(); - let l2_chain_id = self.system_env.chain_id; - let enumeration_index = self.witness_input_merkle_paths.next_enumeration_index(); + let enumeration_index = self.merkle_paths.next_enumeration_index(); + let batch_number = self.l1_batch_env.number; - let mut raw_storage = InMemoryStorage::with_custom_system_contracts_and_chain_id( - l2_chain_id, - hash_bytecode, - Vec::with_capacity(0), - ); + let read_storage_ops = self + .vm_run_data + .witness_block_state + .read_storage_key + .into_iter(); - for (hash, bytes) in self.used_contracts.into_iter() { - tracing::trace!("raw_storage.store_factory_dep({hash}, bytes)"); - raw_storage.store_factory_dep(hash, bytes) - } + let initial_writes_ops = self + .vm_run_data + .witness_block_state + .is_write_initial + .into_iter(); - let block_output_with_proofs = - get_bowp_and_set_initial_values(self.witness_input_merkle_paths, &mut raw_storage); + // We need to define storage slots read during batch execution, and their initial state; + // hence, the use of both read_storage_ops and initial_writes_ops. + // StorageSnapshot also requires providing enumeration indices, + // but they only matter at the end of execution when creating pubdata for the batch, + // which is irrelevant in this case. Thus, enumeration indices are set to dummy values. + let storage = read_storage_ops + .enumerate() + .map(|(i, (hash, bytes))| (hash.hashed_key(), Some((bytes, i as u64 + 1u64)))) + .chain(initial_writes_ops.filter_map(|(key, initial_write)| { + initial_write.then_some((key.hashed_key(), None)) + })) + .collect(); - let storage_view = Rc::new(RefCell::new(StorageView::new(&raw_storage))); + let factory_deps = self + .vm_run_data + .used_bytecodes + .into_iter() + .map(|(hash, bytes)| (u256_to_h256(hash), bytes.into_flattened())) + .collect(); - let batch_number = self.l1_batch_env.number; + let storage_snapshot = StorageSnapshot::new(storage, factory_deps); + let storage_view = StorageView::new(storage_snapshot).to_rc_ptr(); let vm = LegacyVmInstance::new(self.l1_batch_env, self.system_env, storage_view); + let vm_out = execute_vm(self.l2_blocks_execution_data, vm, self.pubdata_params)?; - let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; + let block_output_with_proofs = get_bowp(self.merkle_paths)?; let instructions: Vec = generate_tree_instructions(enumeration_index, &block_output_with_proofs, vm_out)?; @@ -89,11 +109,8 @@ impl Verify for V1TeeVerifierInput { } /// Sets the initial storage values and returns `BlockOutputWithProofs` -fn get_bowp_and_set_initial_values( - witness_input_merkle_paths: WitnessInputMerklePaths, - raw_storage: &mut InMemoryStorage, -) -> BlockOutputWithProofs { - let logs = witness_input_merkle_paths +fn get_bowp(witness_input_merkle_paths: WitnessInputMerklePaths) -> Result { + let logs_result: Result<_, _> = witness_input_merkle_paths .into_merkle_paths() .map( |StorageLogMetadata { @@ -110,29 +127,31 @@ fn get_bowp_and_set_initial_values( let merkle_path = merkle_paths.into_iter().map(|x| x.into()).collect(); let base: TreeLogEntry = match (is_write, first_write, leaf_enumeration_index) { (false, _, 0) => TreeLogEntry::ReadMissingKey, - (false, _, _) => { + (false, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Read {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Read { leaf_index: leaf_enumeration_index, value: value_read.into(), } } + (false, true, _) => { + tracing::error!("get_bowp is_write = false, first_write = true"); + bail!("get_bowp is_write = false, first_write = true"); + } (true, true, _) => TreeLogEntry::Inserted, (true, false, _) => { // This is a special U256 here, which needs `to_little_endian` let mut hashed_key = [0_u8; 32]; leaf_storage_key.to_little_endian(&mut hashed_key); - raw_storage.set_value_hashed_enum( - hashed_key.into(), - leaf_enumeration_index, - value_read.into(), + tracing::trace!( + "TreeLogEntry::Updated {leaf_storage_key:x} = {:x}", + StorageValue::from(value_read) ); TreeLogEntry::Updated { leaf_index: leaf_enumeration_index, @@ -140,25 +159,28 @@ fn get_bowp_and_set_initial_values( } } }; - TreeLogEntryWithProof { + Ok(TreeLogEntryWithProof { base, merkle_path, root_hash, - } + }) }, ) .collect(); - BlockOutputWithProofs { + let logs: Vec = logs_result?; + + Ok(BlockOutputWithProofs { logs, leaf_count: 0, - } + }) } /// Executes the VM and returns `FinishedL1Batch` on success. fn execute_vm( l2_blocks_execution_data: Vec, mut vm: LegacyVmInstance, + pubdata_params: PubdataParams, ) -> anyhow::Result { let next_l2_blocks_data = l2_blocks_execution_data.iter().skip(1); @@ -176,12 +198,18 @@ fn execute_vm( .context("failed to execute transaction in TeeVerifierInputProducer")?; tracing::trace!("Finished execution of tx: {tx:?}"); } + + tracing::trace!("finished l2_block {l2_block_data:?}"); + tracing::trace!("about to vm.start_new_l2_block {next_l2_block_data:?}"); + vm.start_new_l2_block(L2BlockEnv::from_l2_block_data(next_l2_block_data)); tracing::trace!("Finished execution of l2_block: {:?}", l2_block_data.number); } - Ok(vm.finish_batch()) + tracing::trace!("about to vm.finish_batch()"); + + Ok(vm.finish_batch(pubdata_params_to_builder(pubdata_params))) } /// Map `LogQuery` and `TreeLogEntry` to a `TreeInstruction` @@ -191,7 +219,7 @@ fn map_log_tree( idx: &mut u64, ) -> anyhow::Result { let key = storage_log.key.hashed_key_u256(); - Ok(match (storage_log.is_write(), *tree_log_entry) { + let tree_instruction = match (storage_log.is_write(), *tree_log_entry) { (true, TreeLogEntry::Updated { leaf_index, .. }) => { TreeInstruction::write(key, leaf_index, H256(storage_log.value.into())) } @@ -203,24 +231,31 @@ fn map_log_tree( (false, TreeLogEntry::Read { value, .. }) => { if storage_log.value != value { tracing::error!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", - storage_log.value, - value - ); - anyhow::bail!( - "Failed to map LogQuery to TreeInstruction: {:#?} != {:#?}", + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction: read value {:#?} != {:#?}", storage_log.value, value ); + anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } TreeInstruction::Read(key) } (false, TreeLogEntry::ReadMissingKey { .. }) => TreeInstruction::Read(key), - _ => { - tracing::error!("Failed to map LogQuery to TreeInstruction"); + (true, TreeLogEntry::Read { .. }) + | (true, TreeLogEntry::ReadMissingKey) + | (false, TreeLogEntry::Inserted) + | (false, TreeLogEntry::Updated { .. }) => { + tracing::error!( + ?storage_log, + ?tree_log_entry, + "Failed to map LogQuery to TreeInstruction" + ); anyhow::bail!("Failed to map LogQuery to TreeInstruction"); } - }) + }; + + Ok(tree_instruction) } /// Generates the `TreeInstruction`s from the VM executions. @@ -269,8 +304,7 @@ fn execute_tx( mod tests { use zksync_contracts::{BaseSystemContracts, SystemContractCode}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv, TxExecutionMode}; - use zksync_object_store::StoredObject; - use zksync_prover_interface::inputs::TeeVerifierInput; + use zksync_prover_interface::inputs::{TeeVerifierInput, VMRunWitnessInputData}; use zksync_types::U256; use super::*; @@ -278,6 +312,18 @@ mod tests { #[test] fn test_v1_serialization() { let tvi = V1TeeVerifierInput::new( + VMRunWitnessInputData { + l1_batch_number: Default::default(), + used_bytecodes: Default::default(), + initial_heap_content: vec![], + protocol_version: Default::default(), + bootloader_code: vec![], + default_account_code_hash: Default::default(), + evm_emulator_code_hash: Some(Default::default()), + storage_refunds: vec![], + pubdata_costs: vec![], + witness_block_state: Default::default(), + }, WitnessInputMerklePaths::new(0), vec![], L1BatchEnv { @@ -306,21 +352,19 @@ mod tests { code: vec![U256([1; 4])], hash: H256([1; 32]), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: 0, chain_id: Default::default(), - pubdata_params: Default::default(), }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], + Default::default(), ); let tvi = TeeVerifierInput::new(tvi); - let serialized = ::serialize(&tvi) - .expect("Failed to serialize TeeVerifierInput."); + let serialized = bincode::serialize(&tvi).expect("Failed to serialize TeeVerifierInput."); let deserialized: TeeVerifierInput = - ::deserialize(serialized) - .expect("Failed to deserialize TeeVerifierInput."); + bincode::deserialize(&serialized).expect("Failed to deserialize TeeVerifierInput."); assert_eq!(tvi, deserialized); } diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 84ad10b5bbb..5176d90cfd4 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -11,12 +11,12 @@ keywords.workspace = true categories.workspace = true [dependencies] +# **IMPORTANT.** Please do not add dependency on `zksync_config` etc. This crate has a heavy dependency graph as is. zksync_system_constants.workspace = true zksync_utils.workspace = true zksync_basic_types.workspace = true zksync_contracts.workspace = true zksync_mini_merkle_tree.workspace = true -zksync_config.workspace = true zksync_protobuf.workspace = true zksync_crypto_primitives.workspace = true @@ -40,7 +40,6 @@ ethabi.workspace = true tracing.workspace = true # Crypto stuff -secp256k1.workspace = true blake2.workspace = true [dev-dependencies] diff --git a/core/lib/types/src/api/en.rs b/core/lib/types/src/api/en.rs index 52d66a29458..daaa5651a03 100644 --- a/core/lib/types/src/api/en.rs +++ b/core/lib/types/src/api/en.rs @@ -66,3 +66,9 @@ pub struct ConsensusGenesis(pub serde_json::Value); /// The wrapped JSON value corresponds to `zksync_dal::consensus::AttestationStatus`. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct AttestationStatus(pub serde_json::Value); + +/// Block metadata that should have been committed to on L1, but it is not. +/// +/// The wrapped JSON value corresponds to `zksync_dal::consensus::BlockMetadata`. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct BlockMetadata(pub serde_json::Value); diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index e80bed9a9d0..ff24667aa2e 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -15,8 +15,9 @@ pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; use crate::{ - debug_flat_call::DebugCallFlat, protocol_version::L1VerifierConfig, Address, L2BlockNumber, - ProtocolVersionId, + debug_flat_call::{DebugCallFlat, ResultDebugCallFlat}, + protocol_version::L1VerifierConfig, + Address, L2BlockNumber, ProtocolVersionId, }; pub mod en; @@ -706,7 +707,7 @@ pub struct ProtocolVersion { /// Verifier configuration #[deprecated] pub verification_keys_hashes: Option, - /// Hashes of base system contracts (bootloader and default account) + /// Hashes of base system contracts (bootloader, default account and evm emulator) #[deprecated] pub base_system_contracts: Option, /// Bootloader code hash @@ -715,6 +716,9 @@ pub struct ProtocolVersion { /// Default account code hash #[serde(rename = "defaultAccountCodeHash")] pub default_account_code_hash: Option, + /// EVM emulator code hash + #[serde(rename = "evmSimulatorCodeHash")] + pub evm_emulator_code_hash: Option, /// L2 Upgrade transaction hash #[deprecated] pub l2_system_upgrade_tx_hash: Option, @@ -730,6 +734,7 @@ impl ProtocolVersion { timestamp: u64, bootloader_code_hash: H256, default_account_code_hash: H256, + evm_emulator_code_hash: Option, l2_system_upgrade_tx_hash: Option, ) -> Self { Self { @@ -740,9 +745,11 @@ impl ProtocolVersion { base_system_contracts: Some(BaseSystemContractsHashes { bootloader: bootloader_code_hash, default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, }), bootloader_code_hash: Some(bootloader_code_hash), default_account_code_hash: Some(default_account_code_hash), + evm_emulator_code_hash, l2_system_upgrade_tx_hash, l2_system_upgrade_tx_hash_new: l2_system_upgrade_tx_hash, } @@ -758,6 +765,13 @@ impl ProtocolVersion { .or_else(|| self.base_system_contracts.map(|hashes| hashes.default_aa)) } + pub fn evm_emulator_code_hash(&self) -> Option { + self.evm_emulator_code_hash.or_else(|| { + self.base_system_contracts + .and_then(|hashes| hashes.evm_emulator) + }) + } + pub fn minor_version(&self) -> Option { self.minor_version.or(self.version_id) } @@ -813,11 +827,11 @@ pub enum BlockStatus { #[serde(untagged)] pub enum CallTracerBlockResult { CallTrace(Vec), - FlatCallTrace(Vec), + FlatCallTrace(Vec), } impl CallTracerBlockResult { - pub fn unwrap_flat(self) -> Vec { + pub fn unwrap_flat(self) -> Vec { match self { Self::CallTrace(_) => panic!("Result is a FlatCallTrace"), Self::FlatCallTrace(trace) => trace, @@ -991,6 +1005,7 @@ mod tests { base_system_contracts: Some(Default::default()), bootloader_code_hash: Some(Default::default()), default_account_code_hash: Some(Default::default()), + evm_emulator_code_hash: Some(Default::default()), l2_system_upgrade_tx_hash: Default::default(), l2_system_upgrade_tx_hash_new: Default::default(), }; diff --git a/core/lib/types/src/api/state_override.rs b/core/lib/types/src/api/state_override.rs index a2497a65c53..f2986610840 100644 --- a/core/lib/types/src/api/state_override.rs +++ b/core/lib/types/src/api/state_override.rs @@ -21,6 +21,11 @@ impl StateOverride { self.0.get(address) } + /// Gets mutable overrides for the specified account. + pub fn get_mut(&mut self, address: &Address) -> Option<&mut OverrideAccount> { + self.0.get_mut(address) + } + /// Iterates over all account overrides. pub fn iter(&self) -> impl Iterator + '_ { self.0.iter() @@ -48,6 +53,12 @@ impl Bytecode { } } +impl AsRef<[u8]> for Bytecode { + fn as_ref(&self) -> &[u8] { + &self.0 .0 + } +} + impl Serialize for Bytecode { fn serialize(&self, serializer: S) -> Result { self.0.serialize(serializer) diff --git a/core/lib/types/src/block.rs b/core/lib/types/src/block.rs index 1c38e9b1abd..310e3a73b8e 100644 --- a/core/lib/types/src/block.rs +++ b/core/lib/types/src/block.rs @@ -65,6 +65,28 @@ pub struct L1BatchHeader { /// Version of protocol used for the L1 batch. pub protocol_version: Option, pub pubdata_input: Option>, + pub fee_address: Address, +} + +impl L1BatchHeader { + pub fn to_unsealed_header(&self, fee_input: BatchFeeInput) -> UnsealedL1BatchHeader { + UnsealedL1BatchHeader { + number: self.number, + timestamp: self.timestamp, + protocol_version: self.protocol_version, + fee_address: self.fee_address, + fee_input, + } + } +} + +#[derive(Debug, Clone, PartialEq)] +pub struct UnsealedL1BatchHeader { + pub number: L1BatchNumber, + pub timestamp: u64, + pub protocol_version: Option, + pub fee_address: Address, + pub fee_input: BatchFeeInput, } /// Holder for the L2 block metadata that is not available from transactions themselves. @@ -81,7 +103,6 @@ pub struct L2BlockHeader { pub batch_fee_input: BatchFeeInput, pub gas_per_pubdata_limit: u64, pub base_system_contracts_hashes: BaseSystemContractsHashes, - pub pubdata_params: PubdataParams, pub protocol_version: Option, /// The maximal number of virtual blocks to be created in the L2 block. pub virtual_blocks: u32, @@ -92,6 +113,7 @@ pub struct L2BlockHeader { /// amount of gas can be spent on pubdata. pub gas_limit: u64, pub logs_bloom: Bloom, + pub pubdata_params: PubdataParams, } /// Structure that represents the data is returned by the storage oracle during batch execution. @@ -133,6 +155,7 @@ impl L1BatchHeader { system_logs: vec![], protocol_version: Some(protocol_version), pubdata_input: Some(vec![]), + fee_address: Default::default(), } } diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index a238430bf50..99f6c04d131 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -10,7 +10,7 @@ use std::{collections::HashMap, convert::TryFrom}; use ethabi::Token; use serde::{Deserialize, Serialize}; -pub use zksync_basic_types::{commitment::*, web3::contract::Tokenize}; +pub use zksync_basic_types::commitment::*; use zksync_contracts::BaseSystemContractsHashes; use zksync_crypto_primitives::hasher::{keccak::KeccakHasher, Hasher}; use zksync_mini_merkle_tree::MiniMerkleTree; @@ -24,8 +24,8 @@ use crate::{ blob::num_blobs_required, block::{L1BatchHeader, L1BatchTreeData}, l2_to_l1_log::{ - l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes, L2ToL1Log, SystemL2ToL1Log, - UserL2ToL1Log, + l2_to_l1_logs_tree_size, parse_system_logs_for_blob_hashes_pre_gateway, L2ToL1Log, + SystemL2ToL1Log, UserL2ToL1Log, }, web3::keccak256, writes::{ @@ -303,6 +303,13 @@ pub struct L1BatchAuxiliaryCommonOutput { protocol_version: ProtocolVersionId, } +#[derive(Debug, Default, Clone, Copy, Eq, PartialEq)] +#[cfg_attr(test, derive(Serialize, Deserialize))] +pub struct BlobHash { + pub commitment: H256, + pub linear_hash: H256, +} + /// Block Output produced by Virtual Machine #[derive(Debug, Clone, Eq, PartialEq)] #[cfg_attr(test, derive(Serialize, Deserialize))] @@ -321,9 +328,8 @@ pub enum L1BatchAuxiliaryOutput { state_diffs_compressed: Vec, state_diffs_hash: H256, aux_commitments: AuxCommitments, - blob_linear_hashes: Vec, - blob_commitments: Vec, - aggregated_root: H256, + blob_hashes: Vec, + aggregation_root: H256, local_root: H256, }, } @@ -373,9 +379,8 @@ impl L1BatchAuxiliaryOutput { system_logs, state_diffs, aux_commitments, - blob_commitments, - blob_linear_hashes, - aggregated_root, + blob_hashes, + aggregation_root, } => { let l2_l1_logs_compressed = serialize_commitments(&common_input.l2_to_l1_logs); let merkle_tree_leaves = l2_l1_logs_compressed @@ -386,11 +391,10 @@ impl L1BatchAuxiliaryOutput { Some(l2_to_l1_logs_tree_size(common_input.protocol_version)), ) .merkle_root(); - let l2_l1_logs_merkle_root = if common_input.protocol_version.is_pre_gateway() { local_root } else { - KeccakHasher.compress(&local_root, &aggregated_root) + KeccakHasher.compress(&local_root, &aggregation_root) }; let common_output = L1BatchAuxiliaryCommonOutput { @@ -420,10 +424,13 @@ impl L1BatchAuxiliaryOutput { "State diff hash mismatch" ); - let blob_linear_hashes_from_logs = parse_system_logs_for_blob_hashes( - &common_input.protocol_version, - &system_logs, - ); + let blob_linear_hashes_from_logs = + parse_system_logs_for_blob_hashes_pre_gateway( + &common_input.protocol_version, + &system_logs, + ); + let blob_linear_hashes: Vec<_> = + blob_hashes.iter().map(|b| b.linear_hash).collect(); assert_eq!( blob_linear_hashes, blob_linear_hashes_from_logs, "Blob linear hashes mismatch" @@ -443,40 +450,42 @@ impl L1BatchAuxiliaryOutput { ); } - assert_eq!( - blob_linear_hashes.len(), - blob_commitments.len(), - "Blob linear hashes and commitments have different lengths" - ); - Self::PostBoojum { common: common_output, system_logs_linear_hash, state_diffs_compressed, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, local_root, - aggregated_root, + aggregation_root, } } } } - pub fn get_local_root(&self) -> H256 { + pub fn local_root(&self) -> H256 { match self { Self::PreBoojum { common, .. } => common.l2_l1_logs_merkle_root, Self::PostBoojum { local_root, .. } => *local_root, } } - pub fn get_aggregated_root(&self) -> H256 { + pub fn aggregation_root(&self) -> H256 { match self { Self::PreBoojum { .. } => H256::zero(), Self::PostBoojum { - aggregated_root, .. - } => *aggregated_root, + aggregation_root, .. + } => *aggregation_root, + } + } + + pub fn state_diff_hash(&self) -> H256 { + match self { + Self::PreBoojum { .. } => H256::zero(), + Self::PostBoojum { + state_diffs_hash, .. + } => *state_diffs_hash, } } @@ -500,8 +509,7 @@ impl L1BatchAuxiliaryOutput { system_logs_linear_hash, state_diffs_hash, aux_commitments, - blob_linear_hashes, - blob_commitments, + blob_hashes, .. } => { result.extend(system_logs_linear_hash.as_bytes()); @@ -513,9 +521,9 @@ impl L1BatchAuxiliaryOutput { ); result.extend(aux_commitments.events_queue_commitment.as_bytes()); - for i in 0..blob_commitments.len() { - result.extend(blob_linear_hashes[i].as_bytes()); - result.extend(blob_commitments[i].as_bytes()); + for b in blob_hashes { + result.extend(b.linear_hash.as_bytes()); + result.extend(b.commitment.as_bytes()); } } } @@ -541,6 +549,7 @@ pub struct L1BatchMetaParameters { pub zkporter_is_available: bool, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: Option, } @@ -556,10 +565,11 @@ impl L1BatchMetaParameters { .protocol_version .map_or(false, |ver| ver.is_post_1_5_0()) { - // EVM simulator hash for now is the same as the default AA hash. - result.extend(self.default_aa_code_hash.as_bytes()); + let evm_emulator_code_hash = self + .evm_emulator_code_hash + .unwrap_or(self.default_aa_code_hash); + result.extend(evm_emulator_code_hash.as_bytes()); } - result } @@ -625,6 +635,7 @@ impl L1BatchCommitment { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: input.common().bootloader_code_hash, default_aa_code_hash: input.common().default_aa_code_hash, + evm_emulator_code_hash: input.common().evm_emulator_code_hash, protocol_version: Some(input.common().protocol_version), }; @@ -700,24 +711,17 @@ impl L1BatchCommitment { ), }; - let state_diff_hash = match &self.auxiliary_output { - L1BatchAuxiliaryOutput::PostBoojum { - state_diffs_hash, .. - } => *state_diffs_hash, - L1BatchAuxiliaryOutput::PreBoojum { .. } => H256::zero(), - }; - L1BatchCommitmentArtifacts { commitment_hash: self.hash(), l2_l1_merkle_root: self.l2_l1_logs_merkle_root(), compressed_state_diffs, zkporter_is_available: self.meta_parameters.zkporter_is_available, aux_commitments: self.aux_commitments(), - state_diff_hash, compressed_initial_writes, compressed_repeated_writes, - local_root: self.auxiliary_output.get_local_root(), - aggregation_root: self.auxiliary_output.get_aggregated_root(), + local_root: self.auxiliary_output.local_root(), + aggregation_root: self.auxiliary_output.aggregation_root(), + state_diff_hash: self.auxiliary_output.state_diff_hash(), } } } @@ -737,6 +741,7 @@ pub struct CommitmentCommonInput { pub rollup_root_hash: H256, pub bootloader_code_hash: H256, pub default_aa_code_hash: H256, + pub evm_emulator_code_hash: Option, pub protocol_version: ProtocolVersionId, } @@ -753,9 +758,8 @@ pub enum CommitmentInput { system_logs: Vec, state_diffs: Vec, aux_commitments: AuxCommitments, - blob_commitments: Vec, - blob_linear_hashes: Vec, - aggregated_root: H256, + blob_hashes: Vec, + aggregation_root: H256, }, } @@ -779,6 +783,7 @@ impl CommitmentInput { rollup_root_hash, bootloader_code_hash: base_system_contracts_hashes.bootloader, default_aa_code_hash: base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: base_system_contracts_hashes.evm_emulator, protocol_version, }; if protocol_version.is_pre_boojum() { @@ -796,17 +801,11 @@ impl CommitmentInput { events_queue_commitment: H256::zero(), bootloader_initial_content_commitment: H256::zero(), }, - blob_commitments: { - let num_blobs = num_blobs_required(&protocol_version); - - vec![H256::zero(); num_blobs] - }, - blob_linear_hashes: { + blob_hashes: { let num_blobs = num_blobs_required(&protocol_version); - - vec![H256::zero(); num_blobs] + vec![Default::default(); num_blobs] }, - aggregated_root: H256::zero(), + aggregation_root: H256::zero(), } } } @@ -816,12 +815,12 @@ impl CommitmentInput { pub struct L1BatchCommitmentArtifacts { pub commitment_hash: L1BatchCommitmentHash, pub l2_l1_merkle_root: H256, - pub aggregation_root: H256, - pub local_root: H256, pub compressed_state_diffs: Option>, pub compressed_initial_writes: Option>, pub compressed_repeated_writes: Option>, - pub state_diff_hash: H256, pub zkporter_is_available: bool, pub aux_commitments: Option, + pub aggregation_root: H256, + pub local_root: H256, + pub state_diff_hash: H256, } diff --git a/core/lib/types/src/commitment/tests/mod.rs b/core/lib/types/src/commitment/tests/mod.rs index b416e21c4ab..a95318309a2 100644 --- a/core/lib/types/src/commitment/tests/mod.rs +++ b/core/lib/types/src/commitment/tests/mod.rs @@ -51,6 +51,11 @@ fn post_boojum_1_5_0() { run_test("post_boojum_1_5_0_test"); } +#[test] +fn post_boojum_1_5_0_with_evm() { + run_test("post_boojum_1_5_0_test_with_evm"); +} + #[test] fn post_gateway() { run_test("post_gateway_test"); diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json index 87f7c9e51ea..c854a6e77d8 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_1_test.json @@ -190,15 +190,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -253,15 +255,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", "local_root": "0xe52d57bd64cabf6c588b30365512da2bf10912c106e7a06483b236d05ac4037e" } }, diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json index 4ae5b361b7c..96aa8ab842c 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_4_2_test.json @@ -206,15 +206,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" - ], - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -269,15 +271,17 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004" - ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", "local_root": "0x0b6e1ad4643cc2bee06b5e173184ec822d80826e5720f5715172898350433299" } }, diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json index 78f7afb372d..ed61ea67cef 100644 --- a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test.json @@ -238,43 +238,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" } }, "pass_through_data": { @@ -329,43 +359,73 @@ "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" }, - "blob_linear_hashes": [ - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000007", - "0x0000000000000000000000000000000000000000000000000000000000000008", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_commitments": [ - "0x0000000000000000000000000000000000000000000000000000000000000001", - "0x0000000000000000000000000000000000000000000000000000000000000002", - "0x0000000000000000000000000000000000000000000000000000000000000003", - "0x0000000000000000000000000000000000000000000000000000000000000004", - "0x0000000000000000000000000000000000000000000000000000000000000005", - "0x0000000000000000000000000000000000000000000000000000000000000006", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" } }, diff --git a/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json new file mode 100644 index 00000000000..a41aa33c04a --- /dev/null +++ b/core/lib/types/src/commitment/tests/post_boojum_1_5_0_test_with_evm.json @@ -0,0 +1,440 @@ +{ + "input": { + "PostBoojum": { + "common": { + "l2_to_l1_logs": [ + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 0, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x7814f203b8e02f6a676b8f7faefcf732d8b4368bab25239ea4525010aa85d5ee", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + } + ], + "rollup_last_leaf_index": 89, + "rollup_root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b", + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "system_logs": [ + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 0, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000004", + "value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + }, + { + "shard_id": 0, + "is_service": false, + "tx_number_in_block": 1, + "sender": "0x000000000000000000000000000000000000800b", + "key": "0x0000000000000000000000000000000000000000000000000000000000000003", + "value": "0x00000000000000000000000065c22f8000000000000000000000000065c22f81" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000005", + "value": "0x155c82febe94e07df0065c153e8ed403b5351fd64d657c8dffbfbee8ec3d2ba3" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008001", + "key": "0x0000000000000000000000000000000000000000000000000000000000000006", + "value": "0x0000000000000000000000000000000000000000000000000000000000000001" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000000", + "value": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000001", + "value": "0x85a7fb853512ba6575c99ee121dd560559523a4587a2cd7e83cd359cd9ea2aed" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008008", + "key": "0x0000000000000000000000000000000000000000000000000000000000000002", + "value": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000007", + "value": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "shard_id": 0, + "is_service": true, + "tx_number_in_block": 1, + "sender": "0x0000000000000000000000000000000000008011", + "key": "0x0000000000000000000000000000000000000000000000000000000000000008", + "value": "0x0000000000000000000000000000000000000000000000000000000000000008" + } + ], + "state_diffs": [ + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x1", + "derived_key": [ + 113, 233, 23, 33, 249, 145, 133, 118, 215, 96, 240, 47, 3, 202, 196, + 124, 111, 64, 3, 49, 96, 49, 132, 142, 60, 29, 153, 230, 232, 58, + 71, 67 + ], + "enumeration_index": 49, + "initial_value": "0x18776f28c303800", + "final_value": "0x708da482cab20760" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0x294a00337abeee2b3cd948ffeed92231e2a3acc2eb11210400e0aa9557f23e26", + "derived_key": [ + 45, 90, 105, 98, 204, 206, 229, 212, 173, 180, 138, 54, 187, 191, + 68, 58, 83, 23, 33, 72, 67, 129, 18, 89, 55, 243, 0, 26, 197, 255, + 135, 91 + ], + "enumeration_index": 50, + "initial_value": "0xf5559e28fd66c0", + "final_value": "0xf5a19b324caf80" + }, + { + "address": "0x000000000000000000000000000000000000800a", + "key": "0xeaa2b2fbf0b42c559059e5e9510edc15755f1c1883f0e41d5ba5f9aea4ac201a", + "derived_key": [ + 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, 166, 75, 35, 133, + 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, 64, 49, 220, + 193, 72, 27 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x6f05e193353286a0" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x7", + "derived_key": [ + 18, 59, 175, 197, 134, 247, 119, 100, 72, 140, 210, 76, 106, 119, + 84, 110, 90, 15, 232, 189, 251, 79, 162, 3, 207, 175, 252, 54, 204, + 228, 221, 91 + ], + "enumeration_index": 53, + "initial_value": "0x100000000000000000000000065c22e3e", + "final_value": "0x200000000000000000000000065c22f80" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x9", + "derived_key": [ + 142, 125, 208, 106, 197, 183, 59, 71, 59, 230, 188, 90, 81, 3, 15, + 76, 116, 55, 101, 124, 183, 178, 155, 243, 118, 197, 100, 184, 209, + 103, 90, 94 + ], + "enumeration_index": 54, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xd", + "derived_key": [ + 235, 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, + 40, 14, 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, + 247, 152, 97 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0xebbe609cd3ccd11f273eb94374d6d3a2f7856c5f1039dc4877c6a334188ac7c1" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xe", + "derived_key": [ + 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, 58, + 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x708e7fcf68ebab6c87322686cac4bcdb5f2bd4c71f337b18d147fd9a6c44ad13" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0x10c", + "derived_key": [ + 121, 9, 53, 136, 208, 232, 71, 239, 167, 58, 16, 206, 32, 228, 121, + 159, 177, 228, 102, 66, 214, 86, 23, 199, 229, 33, 63, 160, 73, 137, + 217, 45 + ], + "enumeration_index": 57, + "initial_value": "0x200000000000000000000000065c22e3f", + "final_value": "0x400000000000000000000000065c22f81" + }, + { + "address": "0x000000000000000000000000000000000000800b", + "key": "0xad67d757c34507f157cacfa2e3153e9f260a2244f30428821be7be64587ac55f", + "derived_key": [ + 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, 44, 164, 124, 169, + 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, 57, 36, 22, + 48, 203, 70 + ], + "enumeration_index": 0, + "initial_value": "0x0", + "final_value": "0x55618db5ff24aee4d236921b6f4272101161137115a3b4c4a65f8677b124c01c" + } + ], + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + }, + "pass_through_data": { + "shared_states": [ + { + "last_leaf_index": 89, + "root_hash": "0xe47f013d1ecd4ce53b6872f6b762670b393815e7ddacdf2b0886af9c7f3a555b" + }, + { + "last_leaf_index": 0, + "root_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ] + }, + "meta_parameters": { + "zkporter_is_available": false, + "bootloader_code_hash": "0x010007ed0e328b940e241f7666a6303b7ffd4e3fd7e8c154d6e7556befe6cd6d", + "default_aa_code_hash": "0x0100055b7a8be90522251be8be1a186464d056462973502ac8a0437c85e4d2a9", + "evm_emulator_code_hash": "0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91", + "protocol_version": "Version23" + }, + "auxiliary_output": { + "PostBoojum": { + "common": { + "l2_l1_logs_merkle_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff", + "protocol_version": "Version23" + }, + "system_logs_linear_hash": "0x602dacc0a26e3347f0679924c4ae151ff5200e7dd80902fe0fc11c806c4d3ffb", + "state_diffs_compressed": [ + 1, 0, 1, 72, 4, 0, 4, 141, 97, 126, 192, 90, 203, 191, 95, 226, 69, 41, + 166, 75, 35, 133, 169, 106, 173, 67, 240, 155, 225, 173, 169, 44, 112, + 64, 49, 220, 193, 72, 27, 65, 111, 5, 225, 147, 53, 50, 134, 160, 235, + 221, 239, 221, 164, 142, 178, 170, 127, 102, 236, 247, 148, 10, 40, 14, + 158, 243, 251, 46, 149, 219, 9, 149, 83, 132, 64, 166, 42, 247, 152, 97, + 0, 235, 190, 96, 156, 211, 204, 209, 31, 39, 62, 185, 67, 116, 214, 211, + 162, 247, 133, 108, 95, 16, 57, 220, 72, 119, 198, 163, 52, 24, 138, + 199, 193, 70, 64, 215, 56, 69, 54, 78, 198, 145, 246, 222, 251, 96, 106, + 58, 114, 253, 165, 215, 173, 51, 209, 125, 4, 153, 90, 142, 37, 44, 74, + 6, 216, 0, 112, 142, 127, 207, 104, 235, 171, 108, 135, 50, 38, 134, + 202, 196, 188, 219, 95, 43, 212, 199, 31, 51, 123, 24, 209, 71, 253, + 154, 108, 68, 173, 19, 12, 194, 74, 180, 47, 190, 197, 49, 125, 155, 26, + 44, 164, 124, 169, 185, 59, 158, 195, 109, 121, 142, 253, 124, 218, 167, + 57, 36, 22, 48, 203, 70, 0, 85, 97, 141, 181, 255, 36, 174, 228, 210, + 54, 146, 27, 111, 66, 114, 16, 17, 97, 19, 113, 21, 163, 180, 196, 166, + 95, 134, 119, 177, 36, 192, 28, 0, 0, 0, 49, 65, 111, 6, 45, 144, 62, + 129, 207, 96, 0, 0, 0, 50, 49, 75, 253, 9, 79, 72, 192, 0, 0, 0, 53, + 137, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 54, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66, 0, 0, 0, 57, + 137, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 66 + ], + "state_diffs_hash": "0xb18f72a4a5b4b8ce1b7e41095fb1332a211a140376bcc2607910875d236708e0", + "aux_commitments": { + "events_queue_commitment": "0x6193a5098eb140796387bdf40700a3855eeb010474b5478f30bf917172c67883", + "bootloader_initial_content_commitment": "0xf031b4491c37f20516c4ebf428f4765156409f67089e64772f4106fd2d9f3351" + }, + "blob_hashes": [ + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000001", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000003" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000002", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000004" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000003", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000005" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000004", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000006" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000005", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000007" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000006", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000008" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } + ], + "aggregation_root": "0x0000000000000000000000000000000000000000000000000000000000000000", + "local_root": "0x30ba728b1aac22b122de4f32589dd2711da264412cb90e35bf7b1f735dd357ff" + } + }, + "hashes": { + "pass_through_data": "0x6a3ffc0f55d4abce9498b8bcb01a3018bc2b83d96acb27e23772fe9347954725", + "aux_output": "0xadc63d9c45f85598f3e3c232970315d1f6ac96222e379e16ced7a204524a4061", + "meta_parameters": "0x02531e5cc22688523a4ac9317e5097743771f6914015cf1152491cf22084bd58", + "commitment": "0x4fdd8c5b231dfc9fc81aba744a90fbec78627f529ac29f9fc758a7b9e62fa321" + } +} diff --git a/core/lib/types/src/commitment/tests/post_gateway_test.json b/core/lib/types/src/commitment/tests/post_gateway_test.json index a421bd41f95..4b598ff59f4 100644 --- a/core/lib/types/src/commitment/tests/post_gateway_test.json +++ b/core/lib/types/src/commitment/tests/post_gateway_test.json @@ -9,7 +9,7 @@ "PostBoojum": { "common": { "l2_l1_logs_merkle_root": "0x38eaeef3afe69b6f6b2fa22c92da8137f1e405a1e1861b7de7cfa30c7d7462dd", - "protocol_version": "Version25" + "protocol_version": "Version27" }, "system_logs_linear_hash": "0xe8460ce1ed47b77cfee3cadf803aa089c144c506ea2bdd358a6a38ff2c7bc8e3", "state_diffs_compressed": [ @@ -20,43 +20,73 @@ "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" }, - "blob_linear_hashes": [ - "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_commitments": [ - "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1", "local_root": "0xd4790efa9052ea67dcb473de870e3522e2fc340374e6293ad4646fde312c8c76" } }, @@ -64,7 +94,7 @@ "zkporter_is_available": false, "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", - "protocol_version": "Version25" + "protocol_version": "Version27" }, "pass_through_data": { "shared_states": [ @@ -311,7 +341,7 @@ "rollup_root_hash": "0x0332d2acc43785a44b2b84fc010372c8f3e4ff4d0ca5f312de142ffe74189500", "bootloader_code_hash": "0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf", "default_aa_code_hash": "0x0100055d760f11a3d737e7fd1816e600a4cd874a9f17f7a225d1f1c537c51a1e", - "protocol_version": "Version25" + "protocol_version": "Version27" }, "system_logs": [ { @@ -1875,43 +1905,73 @@ "events_queue_commitment": "0xec82208c87a937d88768a0067b2a80f0525eca8288dad2cf96cf8bbe6a1aa565", "bootloader_initial_content_commitment": "0x97df88dcecbcd29b49773c042cdee7a44c57a741e64913fff5aa1b3484232f28" }, - "blob_commitments": [ - "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" - ], - "blob_linear_hashes": [ - "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000", - "0x0000000000000000000000000000000000000000000000000000000000000000" + "blob_hashes": [ + { + "commitment": "0xf840cf3f6b7dc92729b2b9ef3b399e7b896d553b746362fe81c4eb911013570d", + "linear_hash": "0xff4feb4bef9401731ab9db3626c2e015baa6880d7b1c4382d03b30da3a0fd75e" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + { + "commitment": "0x0000000000000000000000000000000000000000000000000000000000000000", + "linear_hash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } ], - "aggregated_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" + "aggregation_root": "0x0924928c1377a6cf24c39c2d46f8eb9df23e811b26dc3527e548396fd4e173b1" } } } diff --git a/core/lib/types/src/debug_flat_call.rs b/core/lib/types/src/debug_flat_call.rs index 89a008b5fb5..5809026e521 100644 --- a/core/lib/types/src/debug_flat_call.rs +++ b/core/lib/types/src/debug_flat_call.rs @@ -3,6 +3,13 @@ use zksync_basic_types::{web3::Bytes, U256}; use crate::{api::DebugCallType, Address, H256}; +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct ResultDebugCallFlat { + pub tx_hash: H256, + pub result: Vec, +} + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DebugCallFlat { @@ -12,6 +19,8 @@ pub struct DebugCallFlat { pub trace_address: Vec, pub transaction_position: usize, pub transaction_hash: H256, + pub block_number: u32, + pub block_hash: H256, pub r#type: DebugCallType, } @@ -32,3 +41,11 @@ pub struct CallResult { pub output: Bytes, pub gas_used: U256, } + +#[derive(Debug, Clone, PartialEq, Default)] +pub struct CallTraceMeta { + pub index_in_block: usize, + pub tx_hash: H256, + pub block_number: u32, + pub block_hash: H256, +} diff --git a/core/lib/types/src/fee_model.rs b/core/lib/types/src/fee_model.rs index b59aa65b04e..ae346656ea6 100644 --- a/core/lib/types/src/fee_model.rs +++ b/core/lib/types/src/fee_model.rs @@ -1,11 +1,13 @@ +// FIXME: separate crate together with node_fee_model interfaces? + use std::num::NonZeroU64; use bigdecimal::{BigDecimal, ToPrimitive}; use serde::{Deserialize, Serialize}; -use zksync_config::configs::chain::{FeeModelVersion, StateKeeperConfig}; use zksync_system_constants::L1_GAS_PER_PUBDATA_BYTE; +use zksync_utils::ceil_div_u256; -use crate::ProtocolVersionId; +use crate::{ProtocolVersionId, U256}; /// Fee input to be provided into the VM. It contains two options: /// - `L1Pegged`: L1 gas price is provided to the VM, and the pubdata price is derived from it. Using this option is required for the @@ -203,6 +205,7 @@ pub struct FeeModelConfigV2 { /// The maximum amount of pubdata that can be used by the batch. Note that if the calldata is used as pubdata, this variable should not exceed 128kb. pub max_pubdata_per_batch: u64, } + impl Default for FeeModelConfig { /// Config with all zeroes is not a valid config (since for instance having 0 max gas per batch may incur division by zero), /// so we implement a sensible default config here. @@ -213,24 +216,6 @@ impl Default for FeeModelConfig { } } -impl FeeModelConfig { - pub fn from_state_keeper_config(state_keeper_config: &StateKeeperConfig) -> Self { - match state_keeper_config.fee_model_version { - FeeModelVersion::V1 => Self::V1(FeeModelConfigV1 { - minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, - }), - FeeModelVersion::V2 => Self::V2(FeeModelConfigV2 { - minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, - compute_overhead_part: state_keeper_config.compute_overhead_part, - pubdata_overhead_part: state_keeper_config.pubdata_overhead_part, - batch_overhead_l1_gas: state_keeper_config.batch_overhead_l1_gas, - max_gas_per_batch: state_keeper_config.max_gas_per_batch, - max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, - }), - } - } -} - #[derive(Debug, Clone, Copy, Serialize, Deserialize)] pub struct FeeParamsV1 { pub config: FeeModelConfigV1, @@ -337,4 +322,442 @@ impl FeeParams { l1_gas_price: 1_000_000_000, }) } + + /// Provides scaled [`BatchFeeInput`] based on these parameters. + pub fn scale( + self, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, + ) -> BatchFeeInput { + match self { + Self::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( + params, + l1_gas_price_scale_factor, + )), + Self::V2(params) => BatchFeeInput::PubdataIndependent(clip_batch_fee_model_input_v2( + compute_batch_fee_model_input_v2( + params, + l1_gas_price_scale_factor, + l1_pubdata_price_scale_factor, + ), + )), + } + } +} + +/// Calculates the batch fee input based on the main node parameters. +/// This function uses the `V1` fee model, i.e. where the pubdata price does not include the proving costs. +fn compute_batch_fee_model_input_v1( + params: FeeParamsV1, + l1_gas_price_scale_factor: f64, +) -> L1PeggedBatchFeeModelInput { + let l1_gas_price = (params.l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; + + L1PeggedBatchFeeModelInput { + l1_gas_price, + fair_l2_gas_price: params.config.minimal_l2_gas_price, + } +} + +/// Calculates the batch fee input based on the main node parameters. +/// This function uses the `V2` fee model, i.e. where the pubdata price does not include the proving costs. +fn compute_batch_fee_model_input_v2( + params: FeeParamsV2, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, +) -> PubdataIndependentBatchFeeModelInput { + let config = params.config(); + let l1_gas_price = params.l1_gas_price(); + let l1_pubdata_price = params.l1_pubdata_price(); + + let FeeModelConfigV2 { + minimal_l2_gas_price, + compute_overhead_part, + pubdata_overhead_part, + batch_overhead_l1_gas, + max_gas_per_batch, + max_pubdata_per_batch, + } = config; + + // Firstly, we scale the gas price and pubdata price in case it is needed. + let l1_gas_price = (l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; + let l1_pubdata_price = (l1_pubdata_price as f64 * l1_pubdata_price_scale_factor) as u64; + + // While the final results of the calculations are not expected to have any overflows, the intermediate computations + // might, so we use U256 for them. + let l1_batch_overhead_wei = U256::from(l1_gas_price) * U256::from(batch_overhead_l1_gas); + + let fair_l2_gas_price = { + // Firstly, we calculate which part of the overall overhead each unit of L2 gas should cover. + let l1_batch_overhead_per_gas = + ceil_div_u256(l1_batch_overhead_wei, U256::from(max_gas_per_batch)); + + // Then, we multiply by the `compute_overhead_part` to get the overhead for the computation for each gas. + // Also, this means that if we almost never close batches because of compute, the `compute_overhead_part` should be zero and so + // it is possible that the computation costs include for no overhead. + let gas_overhead_wei = + (l1_batch_overhead_per_gas.as_u64() as f64 * compute_overhead_part) as u64; + + // We sum up the minimal L2 gas price (i.e. the raw prover/compute cost of a single L2 gas) and the overhead for batch being closed. + minimal_l2_gas_price + gas_overhead_wei + }; + + let fair_pubdata_price = { + // Firstly, we calculate which part of the overall overhead each pubdata byte should cover. + let l1_batch_overhead_per_pubdata = + ceil_div_u256(l1_batch_overhead_wei, U256::from(max_pubdata_per_batch)); + + // Then, we multiply by the `pubdata_overhead_part` to get the overhead for each pubdata byte. + // Also, this means that if we almost never close batches because of pubdata, the `pubdata_overhead_part` should be zero and so + // it is possible that the pubdata costs include no overhead. + let pubdata_overhead_wei = + (l1_batch_overhead_per_pubdata.as_u64() as f64 * pubdata_overhead_part) as u64; + + // We sum up the raw L1 pubdata price (i.e. the expected price of publishing a single pubdata byte) and the overhead for batch being closed. + l1_pubdata_price + pubdata_overhead_wei + }; + + PubdataIndependentBatchFeeModelInput { + l1_gas_price, + fair_l2_gas_price, + fair_pubdata_price, + } +} + +/// Bootloader places limitations on fair_l2_gas_price and fair_pubdata_price. +/// (MAX_ALLOWED_FAIR_L2_GAS_PRICE and MAX_ALLOWED_FAIR_PUBDATA_PRICE in bootloader code respectively) +/// Server needs to clip this prices in order to allow chain continues operation at a loss. The alternative +/// would be to stop accepting the transactions until the conditions improve. +/// TODO (PE-153): to be removed when bootloader limitation is removed +fn clip_batch_fee_model_input_v2( + fee_model: PubdataIndependentBatchFeeModelInput, +) -> PubdataIndependentBatchFeeModelInput { + /// MAX_ALLOWED_FAIR_L2_GAS_PRICE + const MAXIMUM_L2_GAS_PRICE: u64 = 10_000_000_000_000; + /// MAX_ALLOWED_FAIR_PUBDATA_PRICE + const MAXIMUM_PUBDATA_PRICE: u64 = 1_000_000_000_000_000; + PubdataIndependentBatchFeeModelInput { + l1_gas_price: fee_model.l1_gas_price, + fair_l2_gas_price: if fee_model.fair_l2_gas_price < MAXIMUM_L2_GAS_PRICE { + fee_model.fair_l2_gas_price + } else { + tracing::warn!( + "Fair l2 gas price {} exceeds maximum. Limitting to {}", + fee_model.fair_l2_gas_price, + MAXIMUM_L2_GAS_PRICE + ); + MAXIMUM_L2_GAS_PRICE + }, + fair_pubdata_price: if fee_model.fair_pubdata_price < MAXIMUM_PUBDATA_PRICE { + fee_model.fair_pubdata_price + } else { + tracing::warn!( + "Fair pubdata price {} exceeds maximum. Limitting to {}", + fee_model.fair_pubdata_price, + MAXIMUM_PUBDATA_PRICE + ); + MAXIMUM_PUBDATA_PRICE + }, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + // To test that overflow never happens, we'll use giant L1 gas price, i.e. + // almost realistic very large value of 100k gwei. Since it is so large, we'll also + // use it for the L1 pubdata price. + const GWEI: u64 = 1_000_000_000; + const GIANT_L1_GAS_PRICE: u64 = 100_000 * GWEI; + + // As a small L2 gas price we'll use the value of 1 wei. + const SMALL_L1_GAS_PRICE: u64 = 1; + + #[test] + fn test_compute_batch_fee_model_input_v2_giant_numbers() { + let config = FeeModelConfigV2 { + minimal_l2_gas_price: GIANT_L1_GAS_PRICE, + // We generally don't expect those values to be larger than 1. Still, in theory the operator + // may need to set higher values in extreme cases. + compute_overhead_part: 5.0, + pubdata_overhead_part: 5.0, + // The batch overhead would likely never grow beyond that + batch_overhead_l1_gas: 1_000_000, + // Let's imagine that for some reason the limit is relatively small + max_gas_per_batch: 50_000_000, + // The pubdata will likely never go below that + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + // We'll use scale factor of 3.0 + let input = compute_batch_fee_model_input_v2(params, 3.0, 3.0); + + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE * 3); + assert_eq!(input.fair_l2_gas_price, 130_000_000_000_000); + assert_eq!(input.fair_pubdata_price, 15_300_000_000_000_000); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_small_numbers() { + // Here we assume that the operator wants to make the lives of users as cheap as possible. + let config = FeeModelConfigV2 { + minimal_l2_gas_price: SMALL_L1_GAS_PRICE, + compute_overhead_part: 0.0, + pubdata_overhead_part: 0.0, + batch_overhead_l1_gas: 0, + max_gas_per_batch: 50_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + SMALL_L1_GAS_PRICE, + SMALL_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + + assert_eq!(input.l1_gas_price, SMALL_L1_GAS_PRICE); + assert_eq!(input.fair_l2_gas_price, SMALL_L1_GAS_PRICE); + assert_eq!(input.fair_pubdata_price, SMALL_L1_GAS_PRICE); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_only_pubdata_overhead() { + // Here we use sensible config, but when only pubdata is used to close the batch + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 0.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); + // The fair L2 gas price is identical to the minimal one. + assert_eq!(input.fair_l2_gas_price, 100_000_000_000); + // The fair pubdata price is the minimal one plus the overhead. + assert_eq!(input.fair_pubdata_price, 800_000_000_000_000); + } + + #[test] + fn test_compute_baxtch_fee_model_input_v2_only_compute_overhead() { + // Here we use sensible config, but when only compute is used to close the batch + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 1.0, + pubdata_overhead_part: 0.0, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GIANT_L1_GAS_PRICE, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); + assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); + // The fair L2 gas price is identical to the minimal one, plus the overhead + assert_eq!(input.fair_l2_gas_price, 240_000_000_000); + // The fair pubdata price is equal to the original one. + assert_eq!(input.fair_pubdata_price, GIANT_L1_GAS_PRICE); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_param_tweaking() { + // In this test we generally checking that each param behaves as expected + let base_config = FeeModelConfigV2 { + minimal_l2_gas_price: 100_000_000_000, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let base_params = FeeParamsV2::new( + base_config, + 1_000_000_000, + 1_000_000_000, + BaseTokenConversionRatio::default(), + ); + + let base_input = compute_batch_fee_model_input_v2(base_params, 1.0, 1.0); + + let base_input_larger_l1_gas_price = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + base_config, + 2_000_000_000, // double the L1 gas price + 1_000_000_000, + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + let base_input_scaled_l1_gas_price = + compute_batch_fee_model_input_v2(base_params, 2.0, 1.0); + assert_eq!( + base_input_larger_l1_gas_price, base_input_scaled_l1_gas_price, + "Scaling has the correct effect for the L1 gas price" + ); + assert!( + base_input.fair_l2_gas_price < base_input_larger_l1_gas_price.fair_l2_gas_price, + "L1 gas price increase raises L2 gas price" + ); + assert!( + base_input.fair_pubdata_price < base_input_larger_l1_gas_price.fair_pubdata_price, + "L1 gas price increase raises pubdata price" + ); + + let base_input_larger_pubdata_price = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + base_config, + 1_000_000_000, + 2_000_000_000, // double the L1 pubdata price + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + let base_input_scaled_pubdata_price = + compute_batch_fee_model_input_v2(base_params, 1.0, 2.0); + assert_eq!( + base_input_larger_pubdata_price, base_input_scaled_pubdata_price, + "Scaling has the correct effect for the pubdata price" + ); + assert_eq!( + base_input.fair_l2_gas_price, base_input_larger_pubdata_price.fair_l2_gas_price, + "L1 pubdata increase has no effect on L2 gas price" + ); + assert!( + base_input.fair_pubdata_price < base_input_larger_pubdata_price.fair_pubdata_price, + "Pubdata price increase raises pubdata price" + ); + + let base_input_larger_max_gas = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + FeeModelConfigV2 { + max_gas_per_batch: base_config.max_gas_per_batch * 2, + ..base_config + }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + assert!( + base_input.fair_l2_gas_price > base_input_larger_max_gas.fair_l2_gas_price, + "Max gas increase lowers L2 gas price" + ); + assert_eq!( + base_input.fair_pubdata_price, base_input_larger_max_gas.fair_pubdata_price, + "Max gas increase has no effect on pubdata price" + ); + + let base_input_larger_max_pubdata = compute_batch_fee_model_input_v2( + FeeParamsV2::new( + FeeModelConfigV2 { + max_pubdata_per_batch: base_config.max_pubdata_per_batch * 2, + ..base_config + }, + base_params.l1_gas_price(), + base_params.l1_pubdata_price(), + BaseTokenConversionRatio::default(), + ), + 1.0, + 1.0, + ); + assert_eq!( + base_input.fair_l2_gas_price, base_input_larger_max_pubdata.fair_l2_gas_price, + "Max pubdata increase has no effect on L2 gas price" + ); + assert!( + base_input.fair_pubdata_price > base_input_larger_max_pubdata.fair_pubdata_price, + "Max pubdata increase lowers pubdata price" + ); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_l1_gas() { + // In this test we check the gas price limit works as expected + let config = FeeModelConfigV2 { + minimal_l2_gas_price: 100 * GWEI, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let l1_gas_price = 1_000_000_000 * GWEI; + let params = FeeParamsV2::new( + config, + l1_gas_price, + GIANT_L1_GAS_PRICE, + BaseTokenConversionRatio::default(), + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, l1_gas_price); + // The fair L2 gas price is identical to the maximum + assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); + assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); + } + + #[test] + fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_conversion_rate() { + // In this test we check the gas price limit works as expected + let config = FeeModelConfigV2 { + minimal_l2_gas_price: GWEI, + compute_overhead_part: 0.5, + pubdata_overhead_part: 0.5, + batch_overhead_l1_gas: 700_000, + max_gas_per_batch: 500_000_000, + max_pubdata_per_batch: 100_000, + }; + + let params = FeeParamsV2::new( + config, + GWEI, + 2 * GWEI, + BaseTokenConversionRatio { + numerator: NonZeroU64::new(3_000_000).unwrap(), + denominator: NonZeroU64::new(1).unwrap(), + }, + ); + + let input = + clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); + assert_eq!(input.l1_gas_price, 3_000_000 * GWEI); + // The fair L2 gas price is identical to the maximum + assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); + assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); + } } diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 036d2a7a036..48e813e571d 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -216,7 +216,9 @@ impl L2Tx { let raw = req.get_signed_bytes(&sig).context("get_signed_bytes")?; let (req, hash) = TransactionRequest::from_bytes_unverified(&raw).context("from_bytes_unverified()")?; - let mut tx = L2Tx::from_request_unverified(req).context("from_request_unverified()")?; + // Since we allow users to specify `None` recipient, EVM emulation is implicitly enabled. + let mut tx = + L2Tx::from_request_unverified(req, true).context("from_request_unverified()")?; tx.set_input(raw, hash); Ok(tx) } diff --git a/core/lib/types/src/l2_to_l1_log.rs b/core/lib/types/src/l2_to_l1_log.rs index 59ade8873cd..957cfa9a1a6 100644 --- a/core/lib/types/src/l2_to_l1_log.rs +++ b/core/lib/types/src/l2_to_l1_log.rs @@ -1,5 +1,5 @@ use serde::{Deserialize, Serialize}; -use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; +use zksync_system_constants::{BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY, PUBDATA_CHUNK_PUBLISHER_ADDRESS}; use crate::{ blob::{num_blobs_created, num_blobs_required}, @@ -80,10 +80,15 @@ pub fn l2_to_l1_logs_tree_size(protocol_version: ProtocolVersionId) -> usize { } /// Returns the blob hashes parsed out from the system logs -pub fn parse_system_logs_for_blob_hashes( +pub fn parse_system_logs_for_blob_hashes_pre_gateway( protocol_version: &ProtocolVersionId, system_logs: &[SystemL2ToL1Log], ) -> Vec { + assert!( + protocol_version.is_pre_gateway(), + "Cannot parse blob linear hashes from system logs for post gateway" + ); + let num_required_blobs = num_blobs_required(protocol_version) as u32; let num_created_blobs = num_blobs_created(protocol_version) as u32; @@ -95,9 +100,11 @@ pub fn parse_system_logs_for_blob_hashes( .iter() .filter(|log| { log.0.sender == PUBDATA_CHUNK_PUBLISHER_ADDRESS - && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY as u64) + && log.0.key >= H256::from_low_u64_be(BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY as u64) && log.0.key - < H256::from_low_u64_be((BLOB1_LINEAR_HASH_KEY + num_created_blobs) as u64) + < H256::from_low_u64_be( + (BLOB1_LINEAR_HASH_KEY_PRE_GATEWAY + num_created_blobs) as u64, + ) }) .map(|log| (log.0.key, log.0.value)) .collect::>(); diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 86b2e3f03d5..69e6e42fd51 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -5,7 +5,7 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use std::{fmt, fmt::Debug}; +use std::fmt; use anyhow::Context as _; use fee::encoding_len; @@ -43,7 +43,6 @@ pub mod l2; pub mod l2_to_l1_log; pub mod priority_op_onchain_data; pub mod protocol_upgrade; -pub mod pubdata_da; pub mod snapshots; pub mod storage; pub mod system_contracts; @@ -88,9 +87,16 @@ pub struct Transaction { pub raw_bytes: Option, } -impl std::fmt::Debug for Transaction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("Transaction").field(&self.hash()).finish() +impl fmt::Debug for Transaction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(hash) = self.hash_for_debugging() { + f.debug_tuple("Transaction").field(&hash).finish() + } else { + f.debug_struct("Transaction") + .field("initiator_account", &self.initiator_account()) + .field("nonce", &self.nonce()) + .finish() + } } } @@ -136,6 +142,15 @@ impl Transaction { } } + fn hash_for_debugging(&self) -> Option { + match &self.common_data { + ExecuteTransactionCommon::L1(data) => Some(data.hash()), + ExecuteTransactionCommon::L2(data) if data.input.is_some() => Some(data.hash()), + ExecuteTransactionCommon::L2(_) => None, + ExecuteTransactionCommon::ProtocolUpgrade(data) => Some(data.hash()), + } + } + /// Returns the account that initiated this transaction. pub fn initiator_account(&self) -> Address { match &self.common_data { @@ -315,9 +330,14 @@ impl TryFrom for abi::Transaction { } } -impl TryFrom for Transaction { - type Error = anyhow::Error; - fn try_from(tx: abi::Transaction) -> anyhow::Result { +impl Transaction { + /// Converts a transaction from its ABI representation. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables L2 transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_abi(tx: abi::Transaction, allow_no_target: bool) -> anyhow::Result { Ok(match tx { abi::Transaction::L1 { tx, @@ -389,7 +409,7 @@ impl TryFrom for Transaction { abi::Transaction::L2(raw) => { let (req, hash) = transaction_request::TransactionRequest::from_bytes_unverified(&raw)?; - let mut tx = L2Tx::from_request_unverified(req)?; + let mut tx = L2Tx::from_request_unverified(req, allow_no_target)?; tx.set_input(raw, hash); tx.into() } diff --git a/core/lib/types/src/protocol_upgrade.rs b/core/lib/types/src/protocol_upgrade.rs index ff3030d1b4f..2461db26593 100644 --- a/core/lib/types/src/protocol_upgrade.rs +++ b/core/lib/types/src/protocol_upgrade.rs @@ -64,6 +64,8 @@ pub struct ProtocolUpgrade { pub bootloader_code_hash: Option, /// New default account code hash. pub default_account_code_hash: Option, + /// New EVM emulator code hash + pub evm_emulator_code_hash: Option, /// New verifier params. pub verifier_params: Option, /// New verifier address. @@ -120,17 +122,21 @@ impl ProtocolUpgrade { bootloader_code_hash: (bootloader_hash != H256::zero()).then_some(bootloader_hash), default_account_code_hash: (default_account_hash != H256::zero()) .then_some(default_account_hash), + evm_emulator_code_hash: None, // EVM emulator upgrades are not supported yet verifier_params: (upgrade.verifier_params != abi::VerifierParams::default()) .then_some(upgrade.verifier_params.into()), verifier_address: (upgrade.verifier != Address::zero()).then_some(upgrade.verifier), timestamp: upgrade.upgrade_timestamp.try_into().unwrap(), tx: (upgrade.l2_protocol_upgrade_tx.tx_type != U256::zero()) .then(|| { - Transaction::try_from(abi::Transaction::L1 { - tx: upgrade.l2_protocol_upgrade_tx, - factory_deps: upgrade.factory_deps, - eth_block: 0, - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: upgrade.l2_protocol_upgrade_tx, + factory_deps: upgrade.factory_deps, + eth_block: 0, + }, + true, + ) .context("Transaction::try_from()")? .try_into() .map_err(|err| anyhow::format_err!("try_into::(): {err}")) @@ -169,14 +175,17 @@ pub fn decode_genesis_upgrade_event( .unwrap_or_else(|_| panic!("Version is not supported, packed version: {full_version_id}")); Ok(( protocol_version, - Transaction::try_from(abi::Transaction::L1 { - tx: tx.into(), - eth_block: event - .block_number - .expect("Event block number is missing") - .as_u64(), - factory_deps, - }) + Transaction::from_abi( + abi::Transaction::L1 { + tx: tx.into(), + eth_block: event + .block_number + .expect("Event block number is missing") + .as_u64(), + factory_deps, + }, + true, + ) .unwrap() .try_into() .unwrap(), @@ -321,6 +330,9 @@ impl ProtocolVersion { default_aa: upgrade .default_account_code_hash .unwrap_or(self.base_system_contracts_hashes.default_aa), + evm_emulator: upgrade + .evm_emulator_code_hash + .or(self.base_system_contracts_hashes.evm_emulator), }, tx: upgrade.tx, } diff --git a/core/lib/types/src/storage/mod.rs b/core/lib/types/src/storage/mod.rs index 9626a5515dc..3294168b27d 100644 --- a/core/lib/types/src/storage/mod.rs +++ b/core/lib/types/src/storage/mod.rs @@ -6,7 +6,7 @@ pub use log::*; use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::keccak256, L2ChainId}; pub use zksync_system_constants::*; -use zksync_utils::address_to_h256; +use zksync_utils::{address_to_h256, u256_to_h256}; use crate::{AccountTreeId, Address, H160, H256, U256}; @@ -90,6 +90,10 @@ pub fn get_code_key(account: &Address) -> StorageKey { StorageKey::new(account_code_storage, address_to_h256(account)) } +pub fn get_evm_code_hash_key(account: &Address) -> StorageKey { + get_deployer_key(get_address_mapping_key(account, u256_to_h256(1.into()))) +} + pub fn get_known_code_key(hash: &H256) -> StorageKey { let known_codes_storage = AccountTreeId::new(KNOWN_CODES_STORAGE_ADDRESS); StorageKey::new(known_codes_storage, *hash) @@ -110,6 +114,11 @@ fn get_immutable_simulator_log_key(key: H256) -> StorageKey { StorageKey::new(immutable_simulator, key) } +pub fn get_deployer_key(key: H256) -> StorageKey { + let deployer_contract = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); + StorageKey::new(deployer_contract, key) +} + pub fn get_is_account_key(account: &Address) -> StorageKey { let deployer = AccountTreeId::new(CONTRACT_DEPLOYER_ADDRESS); diff --git a/core/lib/types/src/storage/witness_block_state.rs b/core/lib/types/src/storage/witness_block_state.rs index bce9cc9034d..7f3195af873 100644 --- a/core/lib/types/src/storage/witness_block_state.rs +++ b/core/lib/types/src/storage/witness_block_state.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{StorageKey, StorageValue}; /// Storage data used during Witness Generation. -#[derive(Debug, Default, Clone)] +#[derive(Debug, Default, Clone, PartialEq)] pub struct WitnessStorageState { pub read_storage_key: HashMap, pub is_write_initial: HashMap, diff --git a/core/lib/types/src/system_contracts.rs b/core/lib/types/src/system_contracts.rs index 4caf81fd0cf..643aa56a1f1 100644 --- a/core/lib/types/src/system_contracts.rs +++ b/core/lib/types/src/system_contracts.rs @@ -1,11 +1,10 @@ use std::path::PathBuf; -use once_cell::sync::Lazy; use zksync_basic_types::{AccountTreeId, Address, U256}; use zksync_contracts::{read_sys_contract_bytecode, ContractLanguage, SystemContractsRepo}; use zksync_system_constants::{ BOOTLOADER_UTILITIES_ADDRESS, CODE_ORACLE_ADDRESS, COMPRESSOR_ADDRESS, CREATE2_FACTORY_ADDRESS, - EVENT_WRITER_ADDRESS, L2_ASSET_ROUTER_ADDRESS, L2_BRIDGEHUB_ADDRESS, + EVENT_WRITER_ADDRESS, EVM_GAS_MANAGER_ADDRESS, L2_ASSET_ROUTER_ADDRESS, L2_BRIDGEHUB_ADDRESS, L2_GENESIS_UPGRADE_ADDRESS, L2_MESSAGE_ROOT_ADDRESS, L2_NATIVE_TOKEN_VAULT_ADDRESS, P256VERIFY_PRECOMPILE_ADDRESS, PUBDATA_CHUNK_PUBLISHER_ADDRESS, }; @@ -27,7 +26,7 @@ use crate::{ pub const TX_NONCE_INCREMENT: U256 = U256([1, 0, 0, 0]); // 1 pub const DEPLOYMENT_NONCE_INCREMENT: U256 = U256([0, 0, 1, 0]); // 2^128 -static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 30] = [ +static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 31] = [ ( "", "AccountCodeStorage", @@ -149,6 +148,12 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 30] = [ COMPLEX_UPGRADER_ADDRESS, ContractLanguage::Sol, ), + ( + "", + "EvmGasManager", + EVM_GAS_MANAGER_ADDRESS, + ContractLanguage::Yul, + ), // For now, only zero address and the bootloader address have empty bytecode at the init // In the future, we might want to set all of the system contracts this way. ("", "EmptyContract", Address::zero(), ContractLanguage::Sol), @@ -202,29 +207,40 @@ static SYSTEM_CONTRACT_LIST: [(&str, &str, Address, ContractLanguage); 30] = [ ), ]; -static SYSTEM_CONTRACTS: Lazy> = Lazy::new(|| { +/// Gets default set of system contracts, based on Cargo workspace location. +pub fn get_system_smart_contracts(use_evm_emulator: bool) -> Vec { SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) - .collect::>() -}); - -/// Gets default set of system contracts, based on Cargo workspace location. -pub fn get_system_smart_contracts() -> Vec { - SYSTEM_CONTRACTS.clone() + .collect() } /// Loads system contracts from a given directory. -pub fn get_system_smart_contracts_from_dir(path: PathBuf) -> Vec { +pub fn get_system_smart_contracts_from_dir( + path: PathBuf, + use_evm_emulator: bool, +) -> Vec { let repo = SystemContractsRepo { root: path }; SYSTEM_CONTRACT_LIST .iter() - .map(|(path, name, address, contract_lang)| DeployedContract { - account_id: AccountTreeId::new(*address), - bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + .filter_map(|(path, name, address, contract_lang)| { + if *name == "EvmGasManager" && !use_evm_emulator { + None + } else { + Some(DeployedContract { + account_id: AccountTreeId::new(*address), + bytecode: repo.read_sys_contract_bytecode(path, name, contract_lang.clone()), + }) + } }) .collect::>() } diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index 5f26b1d6a6a..a8713f301ba 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -809,6 +809,7 @@ impl TransactionRequest { impl L2Tx { pub(crate) fn from_request_unverified( mut value: TransactionRequest, + allow_no_target: bool, ) -> Result { let fee = value.get_fee_data_checked()?; let nonce = value.get_nonce_checked()?; @@ -817,8 +818,7 @@ impl L2Tx { let meta = value.eip712_meta.take().unwrap_or_default(); validate_factory_deps(&meta.factory_deps)?; - // TODO: Remove this check when evm equivalence gets enabled - if value.to.is_none() { + if value.to.is_none() && !allow_no_target { return Err(SerializationTransactionError::ToAddressIsNull); } @@ -848,11 +848,18 @@ impl L2Tx { Ok(tx) } + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. pub fn from_request( - value: TransactionRequest, + request: TransactionRequest, max_tx_size: usize, + allow_no_target: bool, ) -> Result { - let tx = Self::from_request_unverified(value)?; + let tx = Self::from_request_unverified(request, allow_no_target)?; tx.check_encoded_size(max_tx_size)?; Ok(tx) } @@ -916,11 +923,19 @@ impl From for TransactionRequest { } } -impl TryFrom for L1Tx { - type Error = SerializationTransactionError; - fn try_from(tx: CallRequest) -> Result { +impl L1Tx { + /// Converts a request into a transaction. + /// + /// # Arguments + /// + /// - `allow_no_target` enables / disables transactions without target (i.e., `to` field). + /// This field can only be absent for EVM deployment transactions. + pub fn from_request( + request: CallRequest, + allow_no_target: bool, + ) -> Result { // L1 transactions have no limitations on the transaction size. - let tx: L2Tx = L2Tx::from_request(tx.into(), MAX_ENCODED_TX_SIZE)?; + let tx: L2Tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE, allow_no_target)?; // Note, that while the user has theoretically provided the fee for ETH on L1, // the payment to the operator as well as refunds happen on L2 and so all the ETH @@ -1316,7 +1331,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert!(execute_tx1.is_ok()); let tx2 = TransactionRequest { @@ -1327,7 +1342,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::TooBigNonce @@ -1344,7 +1359,7 @@ mod tests { ..Default::default() }; let execute_tx1: Result = - L2Tx::from_request(tx1, usize::MAX); + L2Tx::from_request(tx1, usize::MAX, true); assert_eq!( execute_tx1.unwrap_err(), SerializationTransactionError::MaxFeePerGasNotU64 @@ -1358,7 +1373,7 @@ mod tests { ..Default::default() }; let execute_tx2: Result = - L2Tx::from_request(tx2, usize::MAX); + L2Tx::from_request(tx2, usize::MAX, true); assert_eq!( execute_tx2.unwrap_err(), SerializationTransactionError::MaxPriorityFeePerGasNotU64 @@ -1376,7 +1391,7 @@ mod tests { }; let execute_tx3: Result = - L2Tx::from_request(tx3, usize::MAX); + L2Tx::from_request(tx3, usize::MAX, true); assert_eq!( execute_tx3.unwrap_err(), SerializationTransactionError::MaxFeePerPubdataByteNotU64 @@ -1432,7 +1447,7 @@ mod tests { let request = TransactionRequest::from_bytes(data.as_slice(), L2ChainId::from(270)).unwrap(); assert_matches!( - L2Tx::from_request(request.0, random_tx_max_size), + L2Tx::from_request(request.0, random_tx_max_size, true), Err(SerializationTransactionError::OversizedData(_, _)) ) } @@ -1458,7 +1473,7 @@ mod tests { }; let try_to_l2_tx: Result = - L2Tx::from_request(call_request.into(), random_tx_max_size); + L2Tx::from_request(call_request.into(), random_tx_max_size, true); assert_matches!( try_to_l2_tx, @@ -1483,15 +1498,20 @@ mod tests { access_list: None, eip712_meta: None, }; - let l2_tx = L2Tx::from_request(call_request_with_nonce.clone().into(), MAX_ENCODED_TX_SIZE) - .unwrap(); + let l2_tx = L2Tx::from_request( + call_request_with_nonce.clone().into(), + MAX_ENCODED_TX_SIZE, + true, + ) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(123u32)); let mut call_request_without_nonce = call_request_with_nonce; call_request_without_nonce.nonce = None; let l2_tx = - L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE).unwrap(); + L2Tx::from_request(call_request_without_nonce.into(), MAX_ENCODED_TX_SIZE, true) + .unwrap(); assert_eq!(l2_tx.nonce(), Nonce(0u32)); } diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index c133261bc23..0edece9e46b 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -1,6 +1,7 @@ use once_cell::sync::Lazy; use serde::{Deserialize, Serialize}; -use zksync_utils::ZeroPrefixHexSerde; +use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; +use zksync_utils::{bytecode::hash_bytecode, ZeroPrefixHexSerde}; use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; @@ -89,8 +90,7 @@ impl Execute { &self.calldata } - /// Prepares calldata to invoke deployer contract. - /// This method encodes parameters for the `create` method. + /// Prepares calldata to invoke deployer contract. This method encodes parameters for the `create` method. pub fn encode_deploy_params_create( salt: H256, contract_hash: H256, @@ -116,4 +116,24 @@ impl Execute { FUNCTION_SIGNATURE.iter().copied().chain(params).collect() } + + /// Creates an instance for deploying the specified bytecode without additional dependencies. If necessary, + /// additional deps can be added to `Self.factory_deps` after this call. + pub fn for_deploy( + salt: H256, + contract_bytecode: Vec, + constructor_input: &[ethabi::Token], + ) -> Self { + let bytecode_hash = hash_bytecode(&contract_bytecode); + Self { + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), + calldata: Self::encode_deploy_params_create( + salt, + bytecode_hash, + ethabi::encode(constructor_input), + ), + value: 0.into(), + factory_deps: vec![contract_bytecode], + } + } } diff --git a/core/lib/utils/src/bytecode.rs b/core/lib/utils/src/bytecode.rs index 48bdb433020..01cce5bc34d 100644 --- a/core/lib/utils/src/bytecode.rs +++ b/core/lib/utils/src/bytecode.rs @@ -1,5 +1,6 @@ // FIXME: move to basic_types? +use zk_evm::k256::sha2::{Digest, Sha256}; use zksync_basic_types::H256; use crate::bytes_to_chunks; @@ -40,6 +41,7 @@ pub fn validate_bytecode(code: &[u8]) -> Result<(), InvalidBytecodeError> { Ok(()) } +/// Hashes the provided EraVM bytecode. pub fn hash_bytecode(code: &[u8]) -> H256 { let chunked_code = bytes_to_chunks(code); let hash = zk_evm::zkevm_opcode_defs::utils::bytecode_to_code_hash(&chunked_code) @@ -55,3 +57,62 @@ pub fn bytecode_len_in_words(bytecodehash: &H256) -> u16 { pub fn bytecode_len_in_bytes(bytecodehash: H256) -> usize { bytecode_len_in_words(&bytecodehash) as usize * 32 } + +/// Bytecode marker encoded in the first byte of the bytecode hash. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[repr(u8)] +pub enum BytecodeMarker { + /// EraVM bytecode marker (1). + EraVm = 1, + /// EVM bytecode marker (2). + Evm = 2, +} + +impl BytecodeMarker { + /// Parses a marker from the bytecode hash. + pub fn new(bytecode_hash: H256) -> Option { + Some(match bytecode_hash.as_bytes()[0] { + val if val == Self::EraVm as u8 => Self::EraVm, + val if val == Self::Evm as u8 => Self::Evm, + _ => return None, + }) + } +} + +/// Hashes the provided EVM bytecode. The bytecode must be padded to an odd number of 32-byte words; +/// bytecodes stored in the known codes storage satisfy this requirement automatically. +pub fn hash_evm_bytecode(bytecode: &[u8]) -> H256 { + validate_bytecode(bytecode).expect("invalid EVM bytecode"); + + let mut hasher = Sha256::new(); + let len = bytecode.len() as u16; + hasher.update(bytecode); + let result = hasher.finalize(); + + let mut output = [0u8; 32]; + output[..].copy_from_slice(result.as_slice()); + output[0] = BytecodeMarker::Evm as u8; + output[1] = 0; + output[2..4].copy_from_slice(&len.to_be_bytes()); + + H256(output) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn bytecode_markers_are_valid() { + let bytecode_hash = hash_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::EraVm) + ); + let bytecode_hash = hash_evm_bytecode(&[0; 32]); + assert_eq!( + BytecodeMarker::new(bytecode_hash), + Some(BytecodeMarker::Evm) + ); + } +} diff --git a/core/lib/utils/src/env.rs b/core/lib/utils/src/env.rs index 5ae07caf148..8f4aa1da940 100644 --- a/core/lib/utils/src/env.rs +++ b/core/lib/utils/src/env.rs @@ -19,8 +19,8 @@ pub enum Workspace<'a> { Core(&'a Path), /// `prover` folder. Prover(&'a Path), - /// `toolbox` folder. - Toolbox(&'a Path), + /// ZK Stack CLI folder. + ZkStackCli(&'a Path), } impl Workspace<'static> { @@ -48,7 +48,7 @@ impl Workspace<'static> { impl<'a> Workspace<'a> { const PROVER_DIRECTORY_NAME: &'static str = "prover"; - const TOOLBOX_DIRECTORY_NAME: &'static str = "zk_toolbox"; + const ZKSTACK_CLI_DIRECTORY_NAME: &'static str = "zkstack_cli"; /// Returns the path of the core workspace. /// For `Workspace::None`, considers the current directory to represent core workspace. @@ -56,7 +56,7 @@ impl<'a> Workspace<'a> { match self { Self::None => PathBuf::from("."), Self::Core(path) => path.into(), - Self::Prover(path) | Self::Toolbox(path) => path.parent().unwrap().into(), + Self::Prover(path) | Self::ZkStackCli(path) => path.parent().unwrap().into(), } } @@ -68,11 +68,11 @@ impl<'a> Workspace<'a> { } } - /// Returns the path of the `zk_toolbox`` workspace. - pub fn toolbox(self) -> PathBuf { + /// Returns the path of the ZK Stack CLI workspace. + pub fn zkstack_cli(self) -> PathBuf { match self { - Self::Toolbox(path) => path.into(), - _ => self.core().join(Self::TOOLBOX_DIRECTORY_NAME), + Self::ZkStackCli(path) => path.into(), + _ => self.core().join(Self::ZKSTACK_CLI_DIRECTORY_NAME), } } } @@ -81,8 +81,8 @@ impl<'a> From<&'a Path> for Workspace<'a> { fn from(path: &'a Path) -> Self { if path.ends_with(Self::PROVER_DIRECTORY_NAME) { Self::Prover(path) - } else if path.ends_with(Self::TOOLBOX_DIRECTORY_NAME) { - Self::Toolbox(path) + } else if path.ends_with(Self::ZKSTACK_CLI_DIRECTORY_NAME) { + Self::ZkStackCli(path) } else { Self::Core(path) } @@ -154,16 +154,16 @@ mod tests { let workspace = Workspace::locate(); assert_matches!(workspace, Workspace::Core(_)); let core_path = workspace.core(); - // Check if prover and toolbox directories exist. + // Check if prover and ZK Stack CLI directories exist. assert!(workspace.prover().exists()); assert_matches!( Workspace::from(workspace.prover().as_path()), Workspace::Prover(_) ); - assert!(workspace.toolbox().exists()); + assert!(workspace.zkstack_cli().exists()); assert_matches!( - Workspace::from(workspace.toolbox().as_path()), - Workspace::Toolbox(_) + Workspace::from(workspace.zkstack_cli().as_path()), + Workspace::ZkStackCli(_) ); // Prover. @@ -181,17 +181,17 @@ mod tests { Workspace::from(workspace.core().as_path()), Workspace::Core(_) ); - assert!(workspace.toolbox().exists()); + assert!(workspace.zkstack_cli().exists()); assert_matches!( - Workspace::from(workspace.toolbox().as_path()), - Workspace::Toolbox(_) + Workspace::from(workspace.zkstack_cli().as_path()), + Workspace::ZkStackCli(_) ); - // Toolbox. - std::env::set_current_dir(workspace.toolbox()).unwrap(); + // ZK Stack CLI + std::env::set_current_dir(workspace.zkstack_cli()).unwrap(); let workspace_path = locate_workspace_inner().unwrap(); let workspace = Workspace::from(workspace_path.as_path()); - assert_matches!(workspace, Workspace::Toolbox(_)); + assert_matches!(workspace, Workspace::ZkStackCli(_)); assert_eq!(workspace.core(), core_path); assert_matches!( Workspace::from(workspace.core().as_path()), diff --git a/core/lib/vm_executor/Cargo.toml b/core/lib/vm_executor/Cargo.toml index 089c2a9bcca..a967aaa969a 100644 --- a/core/lib/vm_executor/Cargo.toml +++ b/core/lib/vm_executor/Cargo.toml @@ -23,3 +23,6 @@ tokio.workspace = true anyhow.workspace = true tracing.workspace = true vise.workspace = true + +[dev-dependencies] +assert_matches.workspace = true diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index 146f0bb4e5c..5877922b333 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -6,18 +6,21 @@ use tokio::sync::mpsc; use zksync_multivm::{ interface::{ executor::{BatchExecutor, BatchExecutorFactory}, + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView, StorageViewStats}, utils::DivergenceHandler, BatchTransactionExecutionResult, BytecodeCompressionError, CompressedBytecodeInfo, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmFactory, VmInterface, VmInterfaceHistoryEnabled, }, + is_supported_by_fast_vm, + pubdata_builders::pubdata_params_to_builder, tracers::CallTracer, vm_fast, vm_latest::HistoryEnabled, FastVmInstance, LegacyVmInstance, MultiVMTracer, }; -use zksync_types::{vm::FastVmMode, Transaction}; +use zksync_types::{commitment::PubdataParams, vm::FastVmMode, Transaction}; use super::{ executor::{Command, MainBatchExecutor}, @@ -115,6 +118,7 @@ impl BatchExecutorFactory storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { // Since we process `BatchExecutor` commands one-by-one (the next command is never enqueued // until a previous command is processed), capacity 1 is enough for the commands channel. @@ -129,8 +133,14 @@ impl BatchExecutorFactory _tracer: PhantomData::, }; - let handle = - tokio::task::spawn_blocking(move || executor.run(storage, l1_batch_params, system_env)); + let handle = tokio::task::spawn_blocking(move || { + executor.run( + storage, + l1_batch_params, + system_env, + pubdata_params_to_builder(pubdata_params), + ) + }); Box::new(MainBatchExecutor::new(handle, commands_sender)) } } @@ -159,6 +169,10 @@ impl BatchVm { storage_ptr: StoragePtr>, mode: FastVmMode, ) -> Self { + if !is_supported_by_fast_vm(system_env.version) { + return Self::Legacy(LegacyVmInstance::new(l1_batch_env, system_env, storage_ptr)); + } + match mode { FastVmMode::Old => { Self::Legacy(LegacyVmInstance::new(l1_batch_env, system_env, storage_ptr)) @@ -178,8 +192,8 @@ impl BatchVm { dispatch_batch_vm!(self.start_new_l2_block(l2_block)); } - fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_batch_vm!(self.finish_batch()) + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + dispatch_batch_vm!(self.finish_batch(pubdata_builder)) } fn make_snapshot(&mut self) { @@ -255,6 +269,7 @@ impl CommandReceiver { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_builder: Rc, ) -> anyhow::Result> { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); @@ -305,7 +320,7 @@ impl CommandReceiver { } } Command::FinishBatch(resp) => { - let vm_block_result = self.finish_batch(&mut vm)?; + let vm_block_result = self.finish_batch(&mut vm, pubdata_builder)?; if resp.send(vm_block_result).is_err() { break; } @@ -360,10 +375,14 @@ impl CommandReceiver { latency.observe(); } - fn finish_batch(&self, vm: &mut BatchVm) -> anyhow::Result { + fn finish_batch( + &self, + vm: &mut BatchVm, + pubdata_builder: Rc, + ) -> anyhow::Result { // The vm execution was paused right after the last transaction was executed. // There is some post-processing work that the VM needs to do before the block is fully processed. - let result = vm.finish_batch(); + let result = vm.finish_batch(pubdata_builder); anyhow::ensure!( !result.block_tip_execution_result.result.is_failed(), "VM must not fail when finalizing block: {:#?}", @@ -443,3 +462,50 @@ impl CommandReceiver { } } } + +#[cfg(test)] +mod tests { + use assert_matches::assert_matches; + use zksync_multivm::interface::{storage::InMemoryStorage, TxExecutionMode}; + use zksync_types::ProtocolVersionId; + + use super::*; + use crate::testonly::{default_l1_batch_env, default_system_env, FAST_VM_MODES}; + + #[test] + fn selecting_vm_for_execution() { + let l1_batch_env = default_l1_batch_env(1); + let mut system_env = SystemEnv { + version: ProtocolVersionId::Version22, + ..default_system_env(TxExecutionMode::VerifyExecute) + }; + let storage = StorageView::new(InMemoryStorage::default()).to_rc_ptr(); + for mode in FAST_VM_MODES { + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + mode, + ); + assert_matches!(vm, BatchVm::Legacy(_)); + } + + system_env.version = ProtocolVersionId::latest(); + let vm = BatchVm::<_, ()>::new( + l1_batch_env.clone(), + system_env.clone(), + storage.clone(), + FastVmMode::Old, + ); + assert_matches!(vm, BatchVm::Legacy(_)); + // let vm = BatchVm::<_, ()>::new( + // l1_batch_env.clone(), + // system_env.clone(), + // storage.clone(), + // FastVmMode::New, + // ); + // assert_matches!(vm, BatchVm::Fast(FastVmInstance::Fast(_))); + // let vm = BatchVm::<_, ()>::new(l1_batch_env, system_env, storage, FastVmMode::Shadow); + // assert_matches!(vm, BatchVm::Fast(FastVmInstance::Shadowed(_))); + } +} diff --git a/core/lib/vm_executor/src/lib.rs b/core/lib/vm_executor/src/lib.rs index 1a0fbb002df..83edb77fd62 100644 --- a/core/lib/vm_executor/src/lib.rs +++ b/core/lib/vm_executor/src/lib.rs @@ -9,3 +9,5 @@ pub mod batch; pub mod oneshot; mod shared; pub mod storage; +#[cfg(test)] +mod testonly; diff --git a/core/lib/vm_executor/src/oneshot/block.rs b/core/lib/vm_executor/src/oneshot/block.rs index cab64289e5e..d6118f15b98 100644 --- a/core/lib/vm_executor/src/oneshot/block.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -7,7 +7,6 @@ use zksync_multivm::{ use zksync_types::{ api, block::{unpack_block_info, L2BlockHasher}, - commitment::PubdataParams, fee_model::BatchFeeInput, AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, @@ -15,7 +14,7 @@ use zksync_types::{ }; use zksync_utils::{h256_to_u256, time::seconds_since_epoch}; -use super::env::OneshotEnvParameters; +use super::{env::OneshotEnvParameters, ContractsKind}; /// Block information necessary to execute a transaction / call. Unlike [`ResolvedBlockInfo`], this information is *partially* resolved, /// which is beneficial for some data workflows. @@ -134,29 +133,34 @@ impl BlockInfo { let protocol_version = l2_block_header .protocol_version .unwrap_or(ProtocolVersionId::last_potentially_undefined()); - + // We cannot use the EVM emulator mentioned in the block as is because of batch vs playground settings etc. + // Instead, we just check whether EVM emulation in general is enabled for a block, and store this binary flag for further use. + let use_evm_emulator = l2_block_header + .base_system_contracts_hashes + .evm_emulator + .is_some(); Ok(ResolvedBlockInfo { state_l2_block_number, state_l2_block_hash: l2_block_header.hash, vm_l1_batch_number, l1_batch_timestamp, protocol_version, + use_evm_emulator, is_pending: self.is_pending_l2_block(), - pubdata_params: l2_block_header.pubdata_params, }) } } /// Resolved [`BlockInfo`] containing additional data from VM state. -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct ResolvedBlockInfo { state_l2_block_number: L2BlockNumber, state_l2_block_hash: H256, vm_l1_batch_number: L1BatchNumber, l1_batch_timestamp: u64, protocol_version: ProtocolVersionId, + use_evm_emulator: bool, is_pending: bool, - pubdata_params: PubdataParams, } impl ResolvedBlockInfo { @@ -164,9 +168,17 @@ impl ResolvedBlockInfo { pub fn state_l2_block_number(&self) -> L2BlockNumber { self.state_l2_block_number } + + pub fn protocol_version(&self) -> ProtocolVersionId { + self.protocol_version + } + + pub fn use_evm_emulator(&self) -> bool { + self.use_evm_emulator + } } -impl OneshotEnvParameters { +impl OneshotEnvParameters { pub(super) async fn to_env_inner( &self, connection: &mut Connection<'_, Core>, @@ -182,13 +194,16 @@ impl OneshotEnvParameters { ) .await?; - let (system, l1_batch) = self.prepare_env( - execution_mode, - resolved_block_info, - next_block, - fee_input, - enforced_base_fee, - ); + let (system, l1_batch) = self + .prepare_env( + execution_mode, + resolved_block_info, + next_block, + fee_input, + enforced_base_fee, + ) + .await?; + Ok(OneshotEnv { system, l1_batch, @@ -196,14 +211,14 @@ impl OneshotEnvParameters { }) } - fn prepare_env( + async fn prepare_env( &self, execution_mode: TxExecutionMode, resolved_block_info: &ResolvedBlockInfo, next_block: L2BlockEnv, fee_input: BatchFeeInput, enforced_base_fee: Option, - ) -> (SystemEnv, L1BatchEnv) { + ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { let &Self { operator_account, validation_computational_gas_limit, @@ -216,13 +231,13 @@ impl OneshotEnvParameters { version: resolved_block_info.protocol_version, base_system_smart_contracts: self .base_system_contracts - .get_by_protocol_version(resolved_block_info.protocol_version) - .clone(), + .base_system_contracts(resolved_block_info) + .await + .context("failed getting base system contracts")?, bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, execution_mode, default_validation_computational_gas_limit: validation_computational_gas_limit, chain_id, - pubdata_params: resolved_block_info.pubdata_params, }; let l1_batch_env = L1BatchEnv { previous_batch_hash: None, @@ -233,7 +248,7 @@ impl OneshotEnvParameters { enforced_base_fee, first_l2_block: next_block, }; - (system_env, l1_batch_env) + Ok((system_env, l1_batch_env)) } } diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs index 2a9204f7d0a..6f9f021345c 100644 --- a/core/lib/vm_executor/src/oneshot/contracts.rs +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -1,9 +1,52 @@ +use std::{fmt, marker::PhantomData}; + +use async_trait::async_trait; use zksync_contracts::BaseSystemContracts; use zksync_types::ProtocolVersionId; +use super::ResolvedBlockInfo; +use crate::shared::Sealed; + +/// Kind of base system contracts used as a marker in the [`BaseSystemContractsProvider`] trait. +pub trait ContractsKind: fmt::Debug + Sealed {} + +/// Marker for [`BaseSystemContracts`] used for gas estimation. +#[derive(Debug)] +pub struct EstimateGas(()); + +impl Sealed for EstimateGas {} +impl ContractsKind for EstimateGas {} + +/// Marker for [`BaseSystemContracts`] used for calls and transaction execution. +#[derive(Debug)] +pub struct CallOrExecute(()); + +impl Sealed for CallOrExecute {} +impl ContractsKind for CallOrExecute {} + +/// Provider of [`BaseSystemContracts`] for oneshot execution. +/// +/// The main implementation of this trait is [`MultiVMBaseSystemContracts`], which selects contracts +/// based on [`ProtocolVersionId`]. +#[async_trait] +pub trait BaseSystemContractsProvider: fmt::Debug + Send + Sync { + /// Returns base system contracts for executing a transaction on top of the provided block. + /// + /// Implementations are encouraged to cache returned contracts for performance; caching is **not** performed + /// by the caller. + /// + /// # Errors + /// + /// Returned errors are treated as unrecoverable for a particular execution, but further executions are not affected. + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result; +} + /// System contracts (bootloader and default account abstraction) for all supported VM versions. -#[derive(Debug, Clone)] -pub(super) struct MultiVMBaseSystemContracts { +#[derive(Debug)] +pub struct MultiVMBaseSystemContracts { /// Contracts to be used for pre-virtual-blocks protocol versions. pre_virtual_blocks: BaseSystemContracts, /// Contracts to be used for post-virtual-blocks protocol versions. @@ -22,14 +65,21 @@ pub(super) struct MultiVMBaseSystemContracts { vm_1_5_0_small_memory: BaseSystemContracts, /// Contracts to be used after the 1.5.0 upgrade vm_1_5_0_increased_memory: BaseSystemContracts, + /// Contracts to be used after the protocol defense upgrade + vm_protocol_defense: BaseSystemContracts, /// Contracts to be used after the gateway upgrade gateway: BaseSystemContracts, + // We use `fn() -> C` marker so that the `MultiVMBaseSystemContracts` unconditionally implements `Send + Sync`. + _contracts_kind: PhantomData C>, } -impl MultiVMBaseSystemContracts { - /// Gets contracts for a certain version. - pub fn get_by_protocol_version(&self, version: ProtocolVersionId) -> &BaseSystemContracts { - match version { +impl MultiVMBaseSystemContracts { + fn get_by_protocol_version( + &self, + version: ProtocolVersionId, + use_evm_emulator: bool, + ) -> BaseSystemContracts { + let base = match version { ProtocolVersionId::Version0 | ProtocolVersionId::Version1 | ProtocolVersionId::Version2 @@ -54,11 +104,25 @@ impl MultiVMBaseSystemContracts { ProtocolVersionId::Version21 | ProtocolVersionId::Version22 => &self.post_1_4_2, ProtocolVersionId::Version23 => &self.vm_1_5_0_small_memory, ProtocolVersionId::Version24 => &self.vm_1_5_0_increased_memory, - ProtocolVersionId::Version25 | ProtocolVersionId::Version26 => &self.gateway, + ProtocolVersionId::Version25 | ProtocolVersionId::Version26 => { + &self.vm_protocol_defense + } + ProtocolVersionId::Version27 | ProtocolVersionId::Version28 => &self.gateway, + }; + let base = base.clone(); + + if version.is_post_1_5_0() && use_evm_emulator { + // EVM emulator is not versioned now; the latest version is always checked out + base.with_latest_evm_emulator() + } else { + base } } +} - pub(super) fn load_estimate_gas_blocking() -> Self { +impl MultiVMBaseSystemContracts { + /// Returned system contracts (mainly the bootloader) are tuned to provide accurate execution metrics. + pub fn load_estimate_gas_blocking() -> Self { Self { pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), @@ -71,11 +135,16 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), vm_1_5_0_increased_memory: BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + vm_protocol_defense: BaseSystemContracts::estimate_gas_post_protocol_defense(), gateway: BaseSystemContracts::estimate_gas_gateway(), + _contracts_kind: PhantomData, } } +} - pub(super) fn load_eth_call_blocking() -> Self { +impl MultiVMBaseSystemContracts { + /// Returned system contracts (mainly the bootloader) are tuned to provide better UX (e.g. revert messages). + pub fn load_eth_call_blocking() -> Self { Self { pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), @@ -88,7 +157,20 @@ impl MultiVMBaseSystemContracts { vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( ), + vm_protocol_defense: BaseSystemContracts::playground_post_protocol_defense(), gateway: BaseSystemContracts::playground_gateway(), + _contracts_kind: PhantomData, } } } + +#[async_trait] +impl BaseSystemContractsProvider for MultiVMBaseSystemContracts { + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result { + Ok(self + .get_by_protocol_version(block_info.protocol_version(), block_info.use_evm_emulator())) + } +} diff --git a/core/lib/vm_executor/src/oneshot/env.rs b/core/lib/vm_executor/src/oneshot/env.rs index 51154d561ec..6d70c3cfde9 100644 --- a/core/lib/vm_executor/src/oneshot/env.rs +++ b/core/lib/vm_executor/src/oneshot/env.rs @@ -1,19 +1,12 @@ -use std::marker::PhantomData; +use std::sync::Arc; -use anyhow::Context; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{OneshotEnv, TxExecutionMode}; use zksync_types::{fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId}; -use crate::oneshot::{contracts::MultiVMBaseSystemContracts, ResolvedBlockInfo}; - -/// Marker for [`OneshotEnvParameters`] used for gas estimation. -#[derive(Debug)] -pub struct EstimateGas(()); - -/// Marker for [`OneshotEnvParameters`] used for calls and/or transaction execution. -#[derive(Debug)] -pub struct CallOrExecute(()); +use super::{ + BaseSystemContractsProvider, CallOrExecute, ContractsKind, EstimateGas, ResolvedBlockInfo, +}; /// Oneshot environment parameters that are expected to be constant or rarely change during the program lifetime. /// These parameters can be used to create [a full environment](OneshotEnv) for transaction / call execution. @@ -21,15 +14,29 @@ pub struct CallOrExecute(()); /// Notably, these parameters include base system contracts (bootloader and default account abstraction) for all supported /// VM versions. #[derive(Debug)] -pub struct OneshotEnvParameters { +pub struct OneshotEnvParameters { pub(super) chain_id: L2ChainId, - pub(super) base_system_contracts: MultiVMBaseSystemContracts, + pub(super) base_system_contracts: Arc>, pub(super) operator_account: AccountTreeId, pub(super) validation_computational_gas_limit: u32, - _ty: PhantomData, } -impl OneshotEnvParameters { +impl OneshotEnvParameters { + /// Creates env parameters. + pub fn new( + base_system_contracts: Arc>, + chain_id: L2ChainId, + operator_account: AccountTreeId, + validation_computational_gas_limit: u32, + ) -> Self { + Self { + chain_id, + base_system_contracts, + operator_account, + validation_computational_gas_limit, + } + } + /// Returns gas limit for account validation of transactions. pub fn validation_computational_gas_limit(&self) -> u32 { self.validation_computational_gas_limit @@ -37,27 +44,6 @@ impl OneshotEnvParameters { } impl OneshotEnvParameters { - /// Creates env parameters for gas estimation. - /// - /// System contracts (mainly, bootloader) for these params are tuned to provide accurate - /// execution metrics. - pub async fn for_gas_estimation( - chain_id: L2ChainId, - operator_account: AccountTreeId, - ) -> anyhow::Result { - Ok(Self { - chain_id, - base_system_contracts: tokio::task::spawn_blocking( - MultiVMBaseSystemContracts::load_estimate_gas_blocking, - ) - .await - .context("failed loading system contracts for gas estimation")?, - operator_account, - validation_computational_gas_limit: u32::MAX, - _ty: PhantomData, - }) - } - /// Prepares environment for gas estimation. pub async fn to_env( &self, @@ -78,28 +64,6 @@ impl OneshotEnvParameters { } impl OneshotEnvParameters { - /// Creates env parameters for transaction / call execution. - /// - /// System contracts (mainly, bootloader) for these params tuned to provide better UX - /// experience (e.g. revert messages). - pub async fn for_execution( - chain_id: L2ChainId, - operator_account: AccountTreeId, - validation_computational_gas_limit: u32, - ) -> anyhow::Result { - Ok(Self { - chain_id, - base_system_contracts: tokio::task::spawn_blocking( - MultiVMBaseSystemContracts::load_eth_call_blocking, - ) - .await - .context("failed loading system contracts for calls")?, - operator_account, - validation_computational_gas_limit, - _ty: PhantomData, - }) - } - /// Prepares environment for a call. pub async fn to_call_env( &self, diff --git a/core/lib/vm_executor/src/oneshot/mock.rs b/core/lib/vm_executor/src/oneshot/mock.rs index 8f3a12603c1..a7363c633c6 100644 --- a/core/lib/vm_executor/src/oneshot/mock.rs +++ b/core/lib/vm_executor/src/oneshot/mock.rs @@ -68,6 +68,7 @@ impl MockOneshotExecutor { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }, ) diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index cb75f396b5d..5f9e4dd3c6f 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -19,8 +19,9 @@ use zksync_multivm::{ executor::{OneshotExecutor, TransactionValidator}, storage::{ReadStorage, StoragePtr, StorageView, WriteStorage}, tracer::{ValidationError, ValidationParams}, - ExecutionResult, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, - StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, VmInterface, + ExecutionResult, InspectExecutionMode, OneshotEnv, OneshotTracingParams, + OneshotTransactionExecutionResult, StoredL2BlockEnv, TxExecutionArgs, TxExecutionMode, + VmInterface, }, tracers::{CallTracer, StorageInvocations, ValidationTracer}, utils::adjust_pubdata_price_for_tx, @@ -40,7 +41,11 @@ use zksync_utils::{h256_to_u256, u256_to_h256}; pub use self::{ block::{BlockInfo, ResolvedBlockInfo}, - env::{CallOrExecute, EstimateGas, OneshotEnvParameters}, + contracts::{ + BaseSystemContractsProvider, CallOrExecute, ContractsKind, EstimateGas, + MultiVMBaseSystemContracts, + }, + env::OneshotEnvParameters, mock::MockOneshotExecutor, }; @@ -165,7 +170,7 @@ where ); let exec_result = executor.apply(|vm, transaction| { vm.push_transaction(transaction); - vm.inspect(&mut tracers.into(), VmExecutionMode::OneTx) + vm.inspect(&mut tracers.into(), InspectExecutionMode::OneTx) }); let validation_result = Arc::make_mut(&mut validation_result) .take() diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index 861ee0649b3..e5a2d404233 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -54,7 +54,6 @@ pub fn l1_batch_params( protocol_version: ProtocolVersionId, virtual_blocks: u32, chain_id: L2ChainId, - pubdata_params: PubdataParams, ) -> (SystemEnv, L1BatchEnv) { ( SystemEnv { @@ -65,7 +64,6 @@ pub fn l1_batch_params( execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: validation_computational_gas_limit, chain_id, - pubdata_params, }, L1BatchEnv { previous_batch_hash: Some(previous_batch_hash), @@ -266,7 +264,7 @@ impl L1BatchParamsProvider { first_l2_block_in_batch: &FirstL2BlockInBatch, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result<(SystemEnv, L1BatchEnv)> { + ) -> anyhow::Result<(SystemEnv, L1BatchEnv, PubdataParams)> { anyhow::ensure!( first_l2_block_in_batch.l1_batch_number > L1BatchNumber(0), "Loading params for genesis L1 batch not supported" @@ -312,11 +310,15 @@ impl L1BatchParamsProvider { let contract_hashes = first_l2_block_in_batch.header.base_system_contracts_hashes; let base_system_contracts = storage .factory_deps_dal() - .get_base_system_contracts(contract_hashes.bootloader, contract_hashes.default_aa) + .get_base_system_contracts( + contract_hashes.bootloader, + contract_hashes.default_aa, + contract_hashes.evm_emulator, + ) .await .context("failed getting base system contracts")?; - Ok(l1_batch_params( + let (system_env, l1_batch_env) = l1_batch_params( first_l2_block_in_batch.l1_batch_number, first_l2_block_in_batch.header.fee_account_address, l1_batch_timestamp, @@ -332,13 +334,12 @@ impl L1BatchParamsProvider { .context("`protocol_version` must be set for L2 block")?, first_l2_block_in_batch.header.virtual_blocks, chain_id, - PubdataParams { - l2_da_validator_address: first_l2_block_in_batch - .header - .pubdata_params - .l2_da_validator_address, - pubdata_type: first_l2_block_in_batch.header.pubdata_params.pubdata_type, - }, + ); + + Ok(( + system_env, + l1_batch_env, + first_l2_block_in_batch.header.pubdata_params, )) } @@ -352,7 +353,7 @@ impl L1BatchParamsProvider { number: L1BatchNumber, validation_computational_gas_limit: u32, chain_id: L2ChainId, - ) -> anyhow::Result> { + ) -> anyhow::Result> { let first_l2_block = self .load_first_l2_block_in_batch(storage, number) .await diff --git a/core/lib/vm_executor/src/testonly.rs b/core/lib/vm_executor/src/testonly.rs new file mode 100644 index 00000000000..5bcd604a432 --- /dev/null +++ b/core/lib/vm_executor/src/testonly.rs @@ -0,0 +1,45 @@ +use once_cell::sync::Lazy; +use zksync_contracts::BaseSystemContracts; +use zksync_multivm::{ + interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, vm::FastVmMode, Address, L1BatchNumber, + L2BlockNumber, L2ChainId, ProtocolVersionId, H256, ZKPORTER_IS_AVAILABLE, +}; + +static BASE_SYSTEM_CONTRACTS: Lazy = + Lazy::new(BaseSystemContracts::load_from_disk); + +pub(crate) const FAST_VM_MODES: [FastVmMode; 3] = + [FastVmMode::Old, FastVmMode::New, FastVmMode::Shadow]; + +pub(crate) fn default_system_env(execution_mode: TxExecutionMode) -> SystemEnv { + SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BASE_SYSTEM_CONTRACTS.clone(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::default(), + } +} + +pub(crate) fn default_l1_batch_env(number: u32) -> L1BatchEnv { + L1BatchEnv { + previous_batch_hash: Some(H256::zero()), + number: L1BatchNumber(number), + timestamp: number.into(), + fee_account: Address::repeat_byte(0x22), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number, + timestamp: number.into(), + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(number - 1)), + max_virtual_blocks_to_create: 1, + }, + fee_input: BatchFeeInput::sensible_l1_pegged_default(), + } +} diff --git a/core/lib/vm_interface/src/executor.rs b/core/lib/vm_interface/src/executor.rs index 119f975fecd..60522ba338a 100644 --- a/core/lib/vm_interface/src/executor.rs +++ b/core/lib/vm_interface/src/executor.rs @@ -3,7 +3,7 @@ use std::fmt; use async_trait::async_trait; -use zksync_types::{l2::L2Tx, Transaction}; +use zksync_types::{commitment::PubdataParams, l2::L2Tx, Transaction}; use crate::{ storage::{ReadStorage, StorageView}, @@ -20,6 +20,7 @@ pub trait BatchExecutorFactory: 'static + Send + fmt::Debug { storage: S, l1_batch_params: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box>; } diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 645e3e7c856..39f949e5d8a 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -24,16 +24,16 @@ pub use crate::{ VmRevertReason, VmRevertReasonParsingError, }, inputs::{ - L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, StoredL2BlockEnv, SystemEnv, - TxExecutionArgs, TxExecutionMode, VmExecutionMode, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, OneshotEnv, OneshotTracingParams, + StoredL2BlockEnv, SystemEnv, TxExecutionArgs, TxExecutionMode, VmExecutionMode, }, outputs::{ BatchTransactionExecutionResult, BootloaderMemory, Call, CallType, CircuitStatistic, CompressedBytecodeInfo, CurrentExecutionState, DeduplicatedWritesMetrics, - ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, Refunds, - TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, - VmExecutionLogs, VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, - VmMemoryMetrics, + ExecutionResult, FinishedL1Batch, L2Block, OneshotTransactionExecutionResult, + PushTransactionResult, Refunds, TransactionExecutionMetrics, + TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionLogs, + VmExecutionMetrics, VmExecutionResultAndLogs, VmExecutionStatistics, VmMemoryMetrics, }, tracer, }, @@ -41,6 +41,7 @@ pub use crate::{ }; pub mod executor; +pub mod pubdata; pub mod storage; mod types; pub mod utils; diff --git a/core/lib/vm_interface/src/pubdata/mod.rs b/core/lib/vm_interface/src/pubdata/mod.rs new file mode 100644 index 00000000000..f901687b5fa --- /dev/null +++ b/core/lib/vm_interface/src/pubdata/mod.rs @@ -0,0 +1,90 @@ +use zksync_types::{ + l2_to_l1_log::L2ToL1Log, writes::StateDiffRecord, Address, ProtocolVersionId, H256, U256, +}; + +/// Corresponds to the following solidity event: +/// ```solidity +/// struct L2ToL1Log { +/// uint8 l2ShardId; +/// bool isService; +/// uint16 txNumberInBlock; +/// address sender; +/// bytes32 key; +/// bytes32 value; +/// } +/// ``` +#[derive(Debug, Default, Clone, PartialEq)] +pub struct L1MessengerL2ToL1Log { + pub l2_shard_id: u8, + pub is_service: bool, + pub tx_number_in_block: u16, + pub sender: Address, + pub key: U256, + pub value: U256, +} + +impl L1MessengerL2ToL1Log { + pub fn packed_encoding(&self) -> Vec { + /// Converts `U256` value into bytes array + fn u256_to_bytes_be(value: &U256) -> Vec { + let mut bytes = vec![0u8; 32]; + value.to_big_endian(bytes.as_mut_slice()); + bytes + } + + let mut res: Vec = vec![]; + res.push(self.l2_shard_id); + res.push(self.is_service as u8); + res.extend_from_slice(&self.tx_number_in_block.to_be_bytes()); + res.extend_from_slice(self.sender.as_bytes()); + res.extend(u256_to_bytes_be(&self.key)); + res.extend(u256_to_bytes_be(&self.value)); + res + } +} + +impl From for L2ToL1Log { + fn from(log: L1MessengerL2ToL1Log) -> Self { + fn u256_to_h256(num: U256) -> H256 { + let mut bytes = [0u8; 32]; + num.to_big_endian(&mut bytes); + H256::from_slice(&bytes) + } + + L2ToL1Log { + shard_id: log.l2_shard_id, + is_service: log.is_service, + tx_number_in_block: log.tx_number_in_block, + sender: log.sender, + key: u256_to_h256(log.key), + value: u256_to_h256(log.value), + } + } +} + +/// Struct based on which the pubdata blob is formed +#[derive(Debug, Clone, Default)] +pub struct PubdataInput { + pub user_logs: Vec, + pub l2_to_l1_messages: Vec>, + pub published_bytecodes: Vec>, + pub state_diffs: Vec, +} + +/// Trait that encapsulates pubdata building logic. It is implemented for rollup and validium cases. +/// If chains needs custom pubdata format then another implementation should be added. +pub trait PubdataBuilder: std::fmt::Debug { + fn l2_da_validator(&self) -> Address; + + fn l1_messenger_operator_input( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; + + fn settlement_layer_pubdata( + &self, + input: &PubdataInput, + protocol_version: ProtocolVersionId, + ) -> Vec; +} diff --git a/core/lib/vm_interface/src/storage/in_memory.rs b/core/lib/vm_interface/src/storage/in_memory.rs index 9e3e3203d01..6bd1dc8d552 100644 --- a/core/lib/vm_interface/src/storage/in_memory.rs +++ b/core/lib/vm_interface/src/storage/in_memory.rs @@ -36,7 +36,7 @@ impl InMemoryStorage { Self::with_custom_system_contracts_and_chain_id( chain_id, bytecode_hasher, - get_system_smart_contracts(), + get_system_smart_contracts(false), ) } diff --git a/core/lib/vm_interface/src/storage/view.rs b/core/lib/vm_interface/src/storage/view.rs index ec9267609e2..249d584c9f6 100644 --- a/core/lib/vm_interface/src/storage/view.rs +++ b/core/lib/vm_interface/src/storage/view.rs @@ -102,6 +102,16 @@ impl StorageView { pub fn cache(&self) -> StorageViewCache { self.cache.clone() } + + /// Provides mutable access to the underlying storage. + /// + /// # Warning + /// + /// Mutating the underlying storage directly can easily break implied `StorageView` invariants, so use with care. + #[doc(hidden)] + pub fn inner_mut(&mut self) -> &mut S { + &mut self.storage_handle + } } impl ReadStorage for Box diff --git a/core/lib/vm_interface/src/types/inputs/execution_mode.rs b/core/lib/vm_interface/src/types/inputs/execution_mode.rs index 41492af6edc..f091a259d30 100644 --- a/core/lib/vm_interface/src/types/inputs/execution_mode.rs +++ b/core/lib/vm_interface/src/types/inputs/execution_mode.rs @@ -13,3 +13,22 @@ pub enum VmExecutionMode { /// Stop after executing the entire bootloader. But before you exit the bootloader. Bootloader, } + +/// Subset of `VmExecutionMode` variants that do not require any additional input +/// and can be invoked with `inspect` method. +#[derive(Debug, Copy, Clone)] +pub enum InspectExecutionMode { + /// Stop after executing the next transaction. + OneTx, + /// Stop after executing the entire bootloader. But before you exit the bootloader. + Bootloader, +} + +impl From for VmExecutionMode { + fn from(mode: InspectExecutionMode) -> Self { + match mode { + InspectExecutionMode::Bootloader => Self::Bootloader, + InspectExecutionMode::OneTx => Self::OneTx, + } + } +} diff --git a/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs b/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs index dbc94247617..0011f0b138b 100644 --- a/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs +++ b/core/lib/vm_interface/src/types/inputs/l1_batch_env.rs @@ -1,5 +1,8 @@ use serde::{Deserialize, Serialize}; -use zksync_types::{fee_model::BatchFeeInput, Address, L1BatchNumber, H256}; +use zksync_types::{ + block::UnsealedL1BatchHeader, fee_model::BatchFeeInput, Address, L1BatchNumber, + ProtocolVersionId, H256, +}; use super::L2BlockEnv; @@ -21,3 +24,18 @@ pub struct L1BatchEnv { pub enforced_base_fee: Option, pub first_l2_block: L2BlockEnv, } + +impl L1BatchEnv { + pub fn into_unsealed_header( + self, + protocol_version: Option, + ) -> UnsealedL1BatchHeader { + UnsealedL1BatchHeader { + number: self.number, + timestamp: self.timestamp, + protocol_version, + fee_address: self.fee_account, + fee_input: self.fee_input, + } + } +} diff --git a/core/lib/vm_interface/src/types/inputs/mod.rs b/core/lib/vm_interface/src/types/inputs/mod.rs index 24f58ae72f1..cb80ba7c138 100644 --- a/core/lib/vm_interface/src/types/inputs/mod.rs +++ b/core/lib/vm_interface/src/types/inputs/mod.rs @@ -3,7 +3,7 @@ use zksync_types::{ }; pub use self::{ - execution_mode::VmExecutionMode, + execution_mode::{InspectExecutionMode, VmExecutionMode}, l1_batch_env::L1BatchEnv, l2_block::{L2BlockEnv, StoredL2BlockEnv}, system_env::{SystemEnv, TxExecutionMode}, diff --git a/core/lib/vm_interface/src/types/inputs/system_env.rs b/core/lib/vm_interface/src/types/inputs/system_env.rs index 67d555f9bc0..5a0496752d5 100644 --- a/core/lib/vm_interface/src/types/inputs/system_env.rs +++ b/core/lib/vm_interface/src/types/inputs/system_env.rs @@ -2,7 +2,7 @@ use std::fmt::Debug; use serde::{Deserialize, Serialize}; use zksync_contracts::BaseSystemContracts; -use zksync_types::{commitment::PubdataParams, L2ChainId, ProtocolVersionId}; +use zksync_types::{L2ChainId, ProtocolVersionId}; /// Params related to the execution process, not batch it self #[derive(Clone, PartialEq, Serialize, Deserialize)] @@ -15,7 +15,6 @@ pub struct SystemEnv { pub execution_mode: TxExecutionMode, pub default_validation_computational_gas_limit: u32, pub chain_id: L2ChainId, - pub pubdata_params: PubdataParams, } impl Debug for SystemEnv { @@ -34,7 +33,6 @@ impl Debug for SystemEnv { ) .field("execution_mode", &self.execution_mode) .field("chain_id", &self.chain_id) - .field("pubdata_params", &self.pubdata_params) .finish() } } diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 3e53aad85f1..018ea075db5 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use serde::{Deserialize, Deserializer, Serialize, Serializer}; use zksync_system_constants::{ BOOTLOADER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, @@ -118,6 +120,10 @@ pub struct VmExecutionResultAndLogs { pub logs: VmExecutionLogs, pub statistics: VmExecutionStatistics, pub refunds: Refunds, + /// Bytecodes decommitted during VM execution. `None` if not computed by the VM. + // FIXME: currently, this is only filled up by `vm_latest`; probably makes sense to narrow down + // to *dynamic* factory deps, so that `HashMap::new()` is a valid value for VMs not supporting EVM emulation. + pub new_known_factory_deps: Option>>, } #[derive(Debug, Clone, PartialEq)] diff --git a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs index 27241c2c0fa..8f7c1d4fb0d 100644 --- a/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs +++ b/core/lib/vm_interface/src/types/outputs/finished_l1batch.rs @@ -26,6 +26,7 @@ impl FinishedL1Batch { logs: VmExecutionLogs::default(), statistics: VmExecutionStatistics::default(), refunds: Refunds::default(), + new_known_factory_deps: None, }, final_execution_state: CurrentExecutionState { events: vec![], diff --git a/core/lib/vm_interface/src/types/outputs/mod.rs b/core/lib/vm_interface/src/types/outputs/mod.rs index 1fa1cd5d168..fe25801dd12 100644 --- a/core/lib/vm_interface/src/types/outputs/mod.rs +++ b/core/lib/vm_interface/src/types/outputs/mod.rs @@ -1,3 +1,5 @@ +use std::borrow::Cow; + pub use self::{ bytecode::CompressedBytecodeInfo, execution_result::{ @@ -20,3 +22,14 @@ mod execution_state; mod finished_l1batch; mod l2_block; mod statistic; + +/// Result of pushing a transaction to the VM state without executing it. +#[derive(Debug)] +pub struct PushTransactionResult<'a> { + /// Compressed bytecodes for the transaction. If the VM doesn't support bytecode compression, returns + /// an empty slice. + /// + /// Importantly, these bytecodes are not guaranteed to be published by the transaction; + /// e.g., it may run out of gas during publication. + pub compressed_bytecodes: Cow<'a, [CompressedBytecodeInfo]>, +} diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index 095547076d4..f8e3851c832 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -109,7 +109,8 @@ pub struct VmExecutionStatistics { pub circuit_statistic: CircuitStatistic, } -/// Oracle metrics of the VM. +/// Oracle metrics reported by legacy VMs. +#[derive(Debug, Default)] pub struct VmMemoryMetrics { pub event_sink_inner: usize, pub event_sink_history: usize, diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs index 5dc2351dcf7..f23d6f307b8 100644 --- a/core/lib/vm_interface/src/utils/dump.rs +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -1,13 +1,14 @@ -use std::collections::HashMap; +use std::{collections::HashMap, rc::Rc}; use serde::{Deserialize, Serialize}; use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2BlockNumber, Transaction, H256}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, - BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, - VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmTrackingContracts, + BytecodeCompressionResult, FinishedL1Batch, InspectExecutionMode, L1BatchEnv, L2BlockEnv, + PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceExt, VmInterfaceHistoryEnabled, VmTrackingContracts, }; fn create_storage_snapshot( @@ -48,6 +49,7 @@ fn create_storage_snapshot( } /// VM dump allowing to re-run the VM on the same inputs. Can be (de)serialized. +/// Note, dump is not capable of finishing batch in terms of VM execution. #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct VmDump { pub l1_batch_env: L1BatchEnv, @@ -98,7 +100,6 @@ impl VmDump { } } } - vm.finish_batch(); vm } } @@ -139,18 +140,30 @@ impl DumpingVm { } } +impl AsRef for DumpingVm { + fn as_ref(&self) -> &Vm { + &self.inner + } +} + +impl AsMut for DumpingVm { + fn as_mut(&mut self) -> &mut Vm { + &mut self.inner + } +} + impl VmInterface for DumpingVm { type TracerDispatcher = Vm::TracerDispatcher; - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult { self.record_transaction(tx.clone()); - self.inner.push_transaction(tx); + self.inner.push_transaction(tx) } fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { self.inner.inspect(dispatcher, execution_mode) } @@ -177,12 +190,8 @@ impl VmInterface for DumpingVm { .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.inner.record_vm_memory_metrics() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - self.inner.finish_batch() + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + self.inner.finish_batch(pubdata_builder) } } diff --git a/core/lib/vm_interface/src/utils/mod.rs b/core/lib/vm_interface/src/utils/mod.rs index 80a51c7b144..394df7fc9a1 100644 --- a/core/lib/vm_interface/src/utils/mod.rs +++ b/core/lib/vm_interface/src/utils/mod.rs @@ -2,7 +2,9 @@ pub use self::{ dump::VmDump, - shadow::{DivergenceErrors, DivergenceHandler, ShadowVm}, + shadow::{ + CheckDivergence, DivergenceErrors, DivergenceHandler, ShadowMut, ShadowRef, ShadowVm, + }, }; mod dump; diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs index 2819e54e9a7..d12d85fa2e3 100644 --- a/core/lib/vm_interface/src/utils/shadow.rs +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -1,7 +1,9 @@ use std::{ + any, cell::RefCell, collections::{BTreeMap, BTreeSet}, fmt, + rc::Rc, sync::Arc, }; @@ -9,10 +11,11 @@ use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transact use super::dump::{DumpingVm, VmDump}; use crate::{ + pubdata::PubdataBuilder, storage::{ReadStorage, StoragePtr, StorageView}, - BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, VmTrackingContracts, + BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, InspectExecutionMode, + L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, VmExecutionResultAndLogs, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, VmTrackingContracts, }; /// Handler for VM divergences. @@ -65,6 +68,154 @@ impl VmWithReporting { } } +/// Reference to either the main or shadow VM. +#[derive(Debug)] +pub enum ShadowRef<'a, Main, Shadow> { + /// Reference to the main VM. + Main(&'a Main), + /// Reference to the shadow VM. + Shadow(&'a Shadow), +} + +/// Mutable reference to either the main or shadow VM. +#[derive(Debug)] +pub enum ShadowMut<'a, Main, Shadow> { + /// Reference to the main VM. + Main(&'a mut Main), + /// Reference to the shadow VM. + Shadow(&'a mut Shadow), +} + +/// Type that can check divergence between its instances. +pub trait CheckDivergence { + /// Checks divergences and returns a list of divergence errors, if any. + fn check_divergence(&self, other: &Self) -> DivergenceErrors; +} + +#[derive(Debug)] +struct DivergingEq(T); + +impl CheckDivergence for DivergingEq { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match(any::type_name::(), &self.0, &other.0); + errors + } +} + +impl CheckDivergence for CurrentExecutionState { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match("final_state.events", &self.events, &other.events); + errors.check_match( + "final_state.user_l2_to_l1_logs", + &self.user_l2_to_l1_logs, + &other.user_l2_to_l1_logs, + ); + errors.check_match( + "final_state.system_logs", + &self.system_logs, + &other.system_logs, + ); + errors.check_match( + "final_state.storage_refunds", + &self.storage_refunds, + &other.storage_refunds, + ); + errors.check_match( + "final_state.pubdata_costs", + &self.pubdata_costs, + &other.pubdata_costs, + ); + errors.check_match( + "final_state.used_contract_hashes", + &self.used_contract_hashes.iter().collect::>(), + &other.used_contract_hashes.iter().collect::>(), + ); + + let main_deduplicated_logs = DivergenceErrors::gather_logs(&self.deduplicated_storage_logs); + let shadow_deduplicated_logs = + DivergenceErrors::gather_logs(&other.deduplicated_storage_logs); + errors.check_match( + "deduplicated_storage_logs", + &main_deduplicated_logs, + &shadow_deduplicated_logs, + ); + errors + } +} + +impl CheckDivergence for VmExecutionResultAndLogs { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.check_match("result", &self.result, &other.result); + errors.check_match("logs.events", &self.logs.events, &other.logs.events); + errors.check_match( + "logs.system_l2_to_l1_logs", + &self.logs.system_l2_to_l1_logs, + &other.logs.system_l2_to_l1_logs, + ); + errors.check_match( + "logs.user_l2_to_l1_logs", + &self.logs.user_l2_to_l1_logs, + &other.logs.user_l2_to_l1_logs, + ); + let main_logs = UniqueStorageLogs::new(&self.logs.storage_logs); + let shadow_logs = UniqueStorageLogs::new(&other.logs.storage_logs); + errors.check_match("logs.storage_logs", &main_logs, &shadow_logs); + errors.check_match("refunds", &self.refunds, &other.refunds); + errors.check_match( + "statistics.circuit_statistic", + &self.statistics.circuit_statistic, + &other.statistics.circuit_statistic, + ); + errors.check_match( + "statistics.pubdata_published", + &self.statistics.pubdata_published, + &other.statistics.pubdata_published, + ); + errors.check_match( + "statistics.gas_remaining", + &self.statistics.gas_remaining, + &other.statistics.gas_remaining, + ); + errors.check_match( + "statistics.gas_used", + &self.statistics.gas_used, + &other.statistics.gas_used, + ); + errors.check_match( + "statistics.computational_gas_used", + &self.statistics.computational_gas_used, + &other.statistics.computational_gas_used, + ); + errors + } +} + +impl CheckDivergence for FinishedL1Batch { + fn check_divergence(&self, other: &Self) -> DivergenceErrors { + let mut errors = DivergenceErrors::new(); + errors.extend( + self.block_tip_execution_result + .check_divergence(&other.block_tip_execution_result), + ); + errors.extend( + self.final_execution_state + .check_divergence(&other.final_execution_state), + ); + + errors.check_match( + "final_bootloader_memory", + &self.final_bootloader_memory, + &other.final_bootloader_memory, + ); + errors.check_match("pubdata_input", &self.pubdata_input, &other.pubdata_input); + errors.check_match("state_diffs", &self.state_diffs, &other.state_diffs); + errors + } +} + /// Shadowed VM that executes 2 VMs for each operation and compares their outputs. /// /// If a divergence is detected, the VM state is dumped using [a pluggable handler](Self::set_dump_handler()), @@ -105,6 +256,66 @@ where pub fn dump_state(&self) -> VmDump { self.main.dump_state() } + + /// Gets the specified value from both the main and shadow VM, checking whether it matches on both. + pub fn get(&self, name: &str, mut action: impl FnMut(ShadowRef<'_, Main, Shadow>) -> R) -> R + where + R: PartialEq + fmt::Debug + 'static, + { + self.get_custom(name, |r| DivergingEq(action(r))).0 + } + + /// Same as [`Self::get()`], but uses custom divergence checks for the type encapsulated in the [`CheckDivergence`] trait. + pub fn get_custom( + &self, + name: &str, + mut action: impl FnMut(ShadowRef<'_, Main, Shadow>) -> R, + ) -> R { + let main_output = action(ShadowRef::Main(self.main.as_ref())); + let borrow = self.shadow.borrow(); + if let Some(shadow) = &*borrow { + let shadow_output = action(ShadowRef::Shadow(&shadow.vm)); + let errors = main_output.check_divergence(&shadow_output); + if let Err(err) = errors.into_result() { + drop(borrow); + self.report_shared(err.context(format!("get({name})"))); + } + } + main_output + } + + /// Gets the specified value from both the main and shadow VM, potentially changing their state + /// and checking whether the returned value matches. + pub fn get_mut( + &mut self, + name: &str, + mut action: impl FnMut(ShadowMut<'_, Main, Shadow>) -> R, + ) -> R + where + R: PartialEq + fmt::Debug + 'static, + { + self.get_custom_mut(name, |r| DivergingEq(action(r))).0 + } + + /// Same as [`Self::get_mut()`], but uses custom divergence checks for the type encapsulated in the [`CheckDivergence`] trait. + pub fn get_custom_mut( + &mut self, + name: &str, + mut action: impl FnMut(ShadowMut<'_, Main, Shadow>) -> R, + ) -> R + where + R: CheckDivergence, + { + let main_output = action(ShadowMut::Main(self.main.as_mut())); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_output = action(ShadowMut::Shadow(&mut shadow.vm)); + let errors = main_output.check_divergence(&shadow_output); + if let Err(err) = errors.into_result() { + self.report_shared(err.context(format!("get_mut({name})"))); + } + } + main_output + } } impl ShadowVm @@ -123,7 +334,7 @@ where where Shadow: VmFactory, { - let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage.clone()); + let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage); let shadow = Shadow::new(batch_env.clone(), system_env.clone(), shadow_storage); let shadow = VmWithReporting { vm: shadow, @@ -151,7 +362,6 @@ where } } -/// **Important.** This doesn't properly handle tracers; they are not passed to the shadow VM! impl VmInterface for ShadowVm where S: ReadStorage, @@ -163,24 +373,41 @@ where ::TracerDispatcher, ); - fn push_transaction(&mut self, tx: Transaction) { + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_> { + let main_result = self.main.push_transaction(tx.clone()); + // Extend lifetime to `'static` so that the result isn't mutably borrowed from the main VM. + // Unfortunately, there's no way to express that this borrow is actually immutable, which would allow not extending the lifetime unless there's a divergence. + let main_result: PushTransactionResult<'static> = PushTransactionResult { + compressed_bytecodes: main_result.compressed_bytecodes.into_owned().into(), + }; + if let Some(shadow) = self.shadow.get_mut() { - shadow.vm.push_transaction(tx.clone()); + let tx_repr = format!("{tx:?}"); // includes little data, so is OK to call proactively + let shadow_result = shadow.vm.push_transaction(tx); + + let mut errors = DivergenceErrors::new(); + errors.check_match( + "bytecodes", + &main_result.compressed_bytecodes, + &shadow_result.compressed_bytecodes, + ); + if let Err(err) = errors.into_result() { + let ctx = format!("pushing transaction {tx_repr}"); + self.report(err.context(ctx)); + } } - self.main.push_transaction(tx); + main_result } fn inspect( &mut self, (main_tracer, shadow_tracer): &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs { let main_result = self.main.inspect(main_tracer, execution_mode); if let Some(shadow) = self.shadow.get_mut() { let shadow_result = shadow.vm.inspect(shadow_tracer, execution_mode); - let mut errors = DivergenceErrors::new(); - errors.check_results_match(&main_result, &shadow_result); - + let errors = main_result.check_divergence(&shadow_result); if let Err(err) = errors.into_result() { let ctx = format!("executing VM with mode {execution_mode:?}"); self.report(err.context(ctx)); @@ -202,7 +429,8 @@ where tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { - let tx_hash = tx.hash(); + let tx_repr = format!("{tx:?}"); // includes little data, so is OK to call proactively + let (main_bytecodes_result, main_tx_result) = self.main.inspect_transaction_with_bytecode_compression( main_tracer, @@ -220,11 +448,10 @@ where tx, with_compression, ); - let mut errors = DivergenceErrors::new(); - errors.check_results_match(&main_tx_result, &shadow_result.1); + let errors = main_tx_result.check_divergence(&shadow_result.1); if let Err(err) = errors.into_result() { let ctx = format!( - "inspecting transaction {tx_hash:?}, with_compression={with_compression:?}" + "inspecting transaction {tx_repr}, with_compression={with_compression:?}" ); self.report(err.context(ctx)); } @@ -232,39 +459,11 @@ where (main_bytecodes_result, main_tx_result) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.main.record_vm_memory_metrics() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let main_batch = self.main.finish_batch(); + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(pubdata_builder.clone()); if let Some(shadow) = self.shadow.get_mut() { - let shadow_batch = shadow.vm.finish_batch(); - let mut errors = DivergenceErrors::new(); - errors.check_results_match( - &main_batch.block_tip_execution_result, - &shadow_batch.block_tip_execution_result, - ); - errors.check_final_states_match( - &main_batch.final_execution_state, - &shadow_batch.final_execution_state, - ); - errors.check_match( - "final_bootloader_memory", - &main_batch.final_bootloader_memory, - &shadow_batch.final_bootloader_memory, - ); - errors.check_match( - "pubdata_input", - &main_batch.pubdata_input, - &shadow_batch.pubdata_input, - ); - errors.check_match( - "state_diffs", - &main_batch.state_diffs, - &shadow_batch.state_diffs, - ); - + let shadow_batch = shadow.vm.finish_batch(pubdata_builder); + let errors = main_batch.check_divergence(&shadow_batch); if let Err(err) = errors.into_result() { self.report(err); } @@ -305,48 +504,15 @@ impl DivergenceErrors { } } + fn extend(&mut self, from: Self) { + self.divergences.extend(from.divergences); + } + fn context(mut self, context: String) -> Self { self.context = Some(context); self } - fn check_results_match( - &mut self, - main_result: &VmExecutionResultAndLogs, - shadow_result: &VmExecutionResultAndLogs, - ) { - self.check_match("result", &main_result.result, &shadow_result.result); - self.check_match( - "logs.events", - &main_result.logs.events, - &shadow_result.logs.events, - ); - self.check_match( - "logs.system_l2_to_l1_logs", - &main_result.logs.system_l2_to_l1_logs, - &shadow_result.logs.system_l2_to_l1_logs, - ); - self.check_match( - "logs.user_l2_to_l1_logs", - &main_result.logs.user_l2_to_l1_logs, - &shadow_result.logs.user_l2_to_l1_logs, - ); - let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); - let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); - self.check_match("logs.storage_logs", &main_logs, &shadow_logs); - self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); - self.check_match( - "statistics.circuit_statistic", - &main_result.statistics.circuit_statistic, - &shadow_result.statistics.circuit_statistic, - ); - self.check_match( - "gas_remaining", - &main_result.statistics.gas_remaining, - &shadow_result.statistics.gas_remaining, - ); - } - fn check_match(&mut self, context: &str, main: &T, shadow: &T) { if main != shadow { let comparison = pretty_assertions::Comparison::new(main, shadow); @@ -355,47 +521,6 @@ impl DivergenceErrors { } } - fn check_final_states_match( - &mut self, - main: &CurrentExecutionState, - shadow: &CurrentExecutionState, - ) { - self.check_match("final_state.events", &main.events, &shadow.events); - self.check_match( - "final_state.user_l2_to_l1_logs", - &main.user_l2_to_l1_logs, - &shadow.user_l2_to_l1_logs, - ); - self.check_match( - "final_state.system_logs", - &main.system_logs, - &shadow.system_logs, - ); - self.check_match( - "final_state.storage_refunds", - &main.storage_refunds, - &shadow.storage_refunds, - ); - self.check_match( - "final_state.pubdata_costs", - &main.pubdata_costs, - &shadow.pubdata_costs, - ); - self.check_match( - "final_state.used_contract_hashes", - &main.used_contract_hashes.iter().collect::>(), - &shadow.used_contract_hashes.iter().collect::>(), - ); - - let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); - let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); - self.check_match( - "deduplicated_storage_logs", - &main_deduplicated_logs, - &shadow_deduplicated_logs, - ); - } - fn gather_logs(logs: &[StorageLog]) -> BTreeMap { logs.iter() .filter(|log| log.is_write()) diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index 90ae76be805..2c25d729e31 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -11,26 +11,34 @@ //! Generally speaking, in most cases, the tracer dispatcher is a wrapper around `Vec>`, //! where `VmTracer` is a trait implemented for a specific VM version. +use std::rc::Rc; + use zksync_types::{Transaction, H256}; use crate::{ - storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmMemoryMetrics, + pubdata::PubdataBuilder, storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, + InspectExecutionMode, L1BatchEnv, L2BlockEnv, PushTransactionResult, SystemEnv, + VmExecutionResultAndLogs, }; pub trait VmInterface { /// Lifetime is used to be able to define `Option<&mut _>` as a dispatcher. type TracerDispatcher: Default; - /// Push transaction to bootloader memory. - fn push_transaction(&mut self, tx: Transaction); + /// Pushes a transaction to bootloader memory for future execution with bytecode compression (if it's supported by the VM). + /// + /// # Return value + /// + /// Returns preprocessing results, such as compressed bytecodes. The results may borrow from the VM state, + /// so you may want to inspect results before next operations with the VM, or clone the necessary parts. + fn push_transaction(&mut self, tx: Transaction) -> PushTransactionResult<'_>; /// Executes the next VM step (either next transaction or bootloader or the whole batch) /// with custom tracers. fn inspect( &mut self, dispatcher: &mut Self::TracerDispatcher, - execution_mode: VmExecutionMode, + execution_mode: InspectExecutionMode, ) -> VmExecutionResultAndLogs; /// Start a new L2 block. @@ -44,18 +52,15 @@ pub trait VmInterface { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs); - /// Record VM memory metrics. - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics; - /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. - fn finish_batch(&mut self) -> FinishedL1Batch; + fn finish_batch(&mut self, pubdata_builder: Rc) -> FinishedL1Batch; } /// Extension trait for [`VmInterface`] that provides some additional methods. pub trait VmInterfaceExt: VmInterface { /// Executes the next VM step (either next transaction or bootloader or the whole batch). - fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { + fn execute(&mut self, execution_mode: InspectExecutionMode) -> VmExecutionResultAndLogs { self.inspect(&mut ::default(), execution_mode) } diff --git a/core/lib/web3_decl/src/namespaces/en.rs b/core/lib/web3_decl/src/namespaces/en.rs index 8a4d2db8c6f..0f1fd9d34b8 100644 --- a/core/lib/web3_decl/src/namespaces/en.rs +++ b/core/lib/web3_decl/src/namespaces/en.rs @@ -28,6 +28,12 @@ pub trait EnNamespace { #[method(name = "consensusGlobalConfig")] async fn consensus_global_config(&self) -> RpcResult>; + #[method(name = "blockMetadata")] + async fn block_metadata( + &self, + block_number: L2BlockNumber, + ) -> RpcResult>; + /// Lists all tokens created at or before the specified `block_number`. /// /// This method is used by EN after snapshot recovery in order to recover token records. diff --git a/core/lib/web3_decl/src/namespaces/eth.rs b/core/lib/web3_decl/src/namespaces/eth.rs index c11ea732bd6..4db58a06c59 100644 --- a/core/lib/web3_decl/src/namespaces/eth.rs +++ b/core/lib/web3_decl/src/namespaces/eth.rs @@ -13,7 +13,8 @@ use zksync_types::{ use crate::{ client::{ForWeb3Network, L2}, types::{ - Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U256, U64, + Block, Bytes, Filter, FilterChanges, Index, Log, SyncState, TransactionReceipt, U64Number, + U256, U64, }, }; @@ -180,10 +181,13 @@ pub trait EthNamespace { #[method(name = "feeHistory")] async fn fee_history( &self, - block_count: U64, + block_count: U64Number, newest_block: BlockNumber, reward_percentiles: Option>, ) -> RpcResult; + + #[method(name = "maxPriorityFeePerGas")] + async fn max_priority_fee_per_gas(&self) -> RpcResult; } #[cfg(feature = "server")] diff --git a/core/lib/web3_decl/src/types.rs b/core/lib/web3_decl/src/types.rs index 9994d21107b..36ee48a54a1 100644 --- a/core/lib/web3_decl/src/types.rs +++ b/core/lib/web3_decl/src/types.rs @@ -16,7 +16,9 @@ use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; pub use zksync_types::{ api::{Block, BlockNumber, Log, TransactionReceipt, TransactionRequest}, ethabi, - web3::{BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, Work}, + web3::{ + BlockHeader, Bytes, CallRequest, FeeHistory, Index, SyncState, TraceFilter, U64Number, Work, + }, Address, Transaction, H160, H256, H64, U256, U64, }; diff --git a/core/lib/zksync_core_leftovers/src/lib.rs b/core/lib/zksync_core_leftovers/src/lib.rs index 46d70396aba..2bdc8094d14 100644 --- a/core/lib/zksync_core_leftovers/src/lib.rs +++ b/core/lib/zksync_core_leftovers/src/lib.rs @@ -42,9 +42,6 @@ pub enum Component { EthTxManager, /// State keeper. StateKeeper, - /// Produces input for the TEE verifier. - /// The blob is later used as input for TEE verifier. - TeeVerifierInputProducer, /// Component for housekeeping task such as cleaning blobs from GCS, reporting metrics etc. Housekeeper, /// Component for exposing APIs to prover for providing proof generation data and accepting proofs. @@ -87,9 +84,6 @@ impl FromStr for Components { "tree_api" => Ok(Components(vec![Component::TreeApi])), "state_keeper" => Ok(Components(vec![Component::StateKeeper])), "housekeeper" => Ok(Components(vec![Component::Housekeeper])), - "tee_verifier_input_producer" => { - Ok(Components(vec![Component::TeeVerifierInputProducer])) - } "eth" => Ok(Components(vec![ Component::EthWatcher, Component::EthTxAggregator, diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index b9ffb750b81..bc2fd77ae73 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -62,4 +62,5 @@ zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true assert_matches.workspace = true +const-decoder.workspace = true test-casing.workspace = true diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index d974f2e9aa1..bdd57462588 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -119,9 +119,16 @@ impl SandboxExecutor { } pub(crate) async fn mock(executor: MockOneshotExecutor) -> Self { + Self::custom_mock(executor, SandboxExecutorOptions::mock().await) + } + + pub(crate) fn custom_mock( + executor: MockOneshotExecutor, + options: SandboxExecutorOptions, + ) -> Self { Self { engine: SandboxExecutorEngine::Mock(executor), - options: SandboxExecutorOptions::mock().await, + options, storage_caches: None, } } @@ -175,7 +182,7 @@ impl SandboxExecutor { let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); let resolve_started_at = Instant::now(); let resolve_time = resolve_started_at.elapsed(); - let resolved_block_info = block_args.inner.resolve(&mut connection).await?; + let resolved_block_info = &block_args.resolved; // We don't want to emit too many logs. if resolve_time > Duration::from_millis(10) { tracing::debug!("Resolved block numbers (took {resolve_time:?})"); @@ -185,7 +192,7 @@ impl SandboxExecutor { SandboxAction::Execution { fee_input, tx } => { self.options .eth_call - .to_execute_env(&mut connection, &resolved_block_info, *fee_input, tx) + .to_execute_env(&mut connection, resolved_block_info, *fee_input, tx) .await? } &SandboxAction::Call { @@ -197,7 +204,7 @@ impl SandboxExecutor { .eth_call .to_call_env( &mut connection, - &resolved_block_info, + resolved_block_info, fee_input, enforced_base_fee, ) @@ -210,7 +217,7 @@ impl SandboxExecutor { } => { self.options .estimate_gas - .to_env(&mut connection, &resolved_block_info, fee_input, base_fee) + .to_env(&mut connection, resolved_block_info, fee_input, base_fee) .await? } }; diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index 36f10b8e9b0..b560d161ab5 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -10,7 +10,7 @@ use zksync_multivm::utils::get_eth_call_gas_limit; use zksync_types::{ api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, ProtocolVersionId, U256, }; -use zksync_vm_executor::oneshot::BlockInfo; +use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; use self::vm_metrics::SandboxStage; pub(super) use self::{ @@ -285,21 +285,32 @@ pub enum BlockArgsError { } /// Information about a block provided to VM. -#[derive(Debug, Clone, Copy)] +#[derive(Debug, Clone)] pub(crate) struct BlockArgs { inner: BlockInfo, + resolved: ResolvedBlockInfo, block_id: api::BlockId, } impl BlockArgs { pub async fn pending(connection: &mut Connection<'_, Core>) -> anyhow::Result { let inner = BlockInfo::pending(connection).await?; + let resolved = inner.resolve(connection).await?; Ok(Self { inner, + resolved, block_id: api::BlockId::Number(api::BlockNumber::Pending), }) } + pub fn protocol_version(&self) -> ProtocolVersionId { + self.resolved.protocol_version() + } + + pub fn use_evm_emulator(&self) -> bool { + self.resolved.use_evm_emulator() + } + /// Loads block information from DB. pub async fn new( connection: &mut Connection<'_, Core>, @@ -326,8 +337,10 @@ impl BlockArgs { return Err(BlockArgsError::Missing); }; + let inner = BlockInfo::for_existing_block(connection, block_number).await?; Ok(Self { - inner: BlockInfo::for_existing_block(connection, block_number).await?, + inner, + resolved: inner.resolve(connection).await?, block_id, }) } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 38115b5251f..e342f2d73de 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -5,15 +5,13 @@ use std::collections::HashMap; use assert_matches::assert_matches; use test_casing::test_casing; use zksync_dal::ConnectionPool; -use zksync_multivm::{ - interface::{ExecutionResult, Halt, VmRevertReason}, - utils::derive_base_fee_and_gas_per_pubdata, -}; +use zksync_multivm::{interface::ExecutionResult, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_node_test_utils::{create_l1_batch, create_l2_block, prepare_recovery_snapshot}; use zksync_state::PostgresStorageCaches; use zksync_types::{ api::state_override::{OverrideAccount, StateOverride}, + fee::Fee, fee_model::BatchFeeInput, K256PrivateKey, ProtocolVersionId, Transaction, U256, }; @@ -95,17 +93,6 @@ async fn creating_block_args_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); - assert_eq!( - pending_block_args.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - ); - assert_eq!( - pending_block_args.resolved_block_number(), - snapshot_recovery.l2_block_number + 1 - ); - assert!(pending_block_args.is_pending()); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) .await .unwrap(); @@ -124,6 +111,35 @@ async fn creating_block_args_after_snapshot_recovery() { .unwrap_err(); assert_matches!(err, BlockArgsError::Missing); + // Ensure there is a batch in the storage. + let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); + storage + .blocks_dal() + .insert_l2_block(&l2_block) + .await + .unwrap(); + storage + .blocks_dal() + .insert_mock_l1_batch(&create_l1_batch(snapshot_recovery.l1_batch_number.0 + 1)) + .await + .unwrap(); + storage + .blocks_dal() + .mark_l2_blocks_as_executed_in_l1_batch(snapshot_recovery.l1_batch_number + 1) + .await + .unwrap(); + + let pending_block_args = BlockArgs::pending(&mut storage).await.unwrap(); + assert_eq!( + pending_block_args.block_id, + api::BlockId::Number(api::BlockNumber::Pending) + ); + assert_eq!( + pending_block_args.resolved_block_number(), + snapshot_recovery.l2_block_number + 2 + ); + assert!(pending_block_args.is_pending()); + let pruned_blocks = [ api::BlockNumber::Earliest, 0.into(), @@ -149,13 +165,6 @@ async fn creating_block_args_after_snapshot_recovery() { assert_matches!(err, BlockArgsError::Missing); } - let l2_block = create_l2_block(snapshot_recovery.l2_block_number.0 + 1); - storage - .blocks_dal() - .insert_l2_block(&l2_block) - .await - .unwrap(); - let latest_block_args = BlockArgs::new(&mut storage, latest_block, &start_info) .await .unwrap(); @@ -213,11 +222,16 @@ async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args let fee_input = BatchFeeInput::l1_pegged(55, 555); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = Transaction::from(K256PrivateKey::random().create_transfer( + let tx = K256PrivateKey::random().create_transfer_with_fee( 0.into(), - base_fee, - gas_per_pubdata, - )); + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); + let tx = Transaction::from(tx); let (limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = limiter.acquire().await.unwrap(); @@ -256,7 +270,15 @@ async fn validating_transaction(set_balance: bool) { let fee_input = BatchFeeInput::l1_pegged(55, 555); let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = K256PrivateKey::random().create_transfer(0.into(), base_fee, gas_per_pubdata); + let tx = K256PrivateKey::random().create_transfer_with_fee( + 0.into(), + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); let (limiter, _) = VmConcurrencyLimiter::new(1); let vm_permit = limiter.acquire().await.unwrap(); @@ -285,18 +307,6 @@ async fn validating_transaction(set_balance: bool) { if set_balance { assert_matches!(result, ExecutionResult::Success { .. }); } else { - // FIXME: maybe provide a better way to encode it? - let expected_reason = VmRevertReason::Unknown { - function_selector: vec![3, 235, 139, 84], - data: vec![ - 3, 235, 139, 84, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 6, 157, 185, 192, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }; - assert_matches!( - result, - ExecutionResult::Halt { reason: Halt::ValidationFailed(reason) } if reason == expected_reason - ); + assert_matches!(result, ExecutionResult::Halt { .. }); } } diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs index 5ee9cfb8ef1..c2f900484ba 100644 --- a/core/node/api_server/src/testonly.rs +++ b/core/node/api_server/src/testonly.rs @@ -1,53 +1,68 @@ //! Test utils shared among multiple modules. -use std::iter; +use std::{collections::HashMap, iter}; +use const_decoder::Decoder; use zk_evm_1_5_0::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; use zksync_contracts::{ - get_loadnext_contract, load_contract, read_bytecode, + eth_contract, get_loadnext_contract, load_contract, read_bytecode, test_contracts::LoadnextContractExecutionParams, }; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ - ethabi::Token, fee::Fee, l2::L2Tx, transaction_request::PaymasterParams, Address, - K256PrivateKey, L2ChainId, Nonce, H256, U256, + api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, + ethabi, + ethabi::Token, + fee::Fee, + fee_model::FeeParams, + get_code_key, get_known_code_key, + l2::L2Tx, + transaction_request::{CallRequest, PaymasterParams}, + utils::storage_key_for_eth_balance, + AccountTreeId, Address, K256PrivateKey, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, + StorageKey, StorageLog, H256, U256, }; +use zksync_utils::{address_to_u256, u256_to_h256}; -pub(crate) const LOAD_TEST_ADDRESS: Address = Address::repeat_byte(1); +pub(crate) const RAW_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"00000000000000000000000000000000000000000000000000000000000001266080604052348015\ + 600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063fb5343f314604c57\ + 5b5f80fd5b604a60048036038101906046919060a6565b6066565b005b6052606f565b604051605d\ + 919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd5b5f819050919050\ + 565b6088816078565b81146091575f80fd5b50565b5f8135905060a0816081565b92915050565b5f\ + 6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092915050565b60d381\ + 6078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056fea2646970667358\ + 221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9ce4d0964736f6c63\ + 4300081a00330000000000000000000000000000000000000000000000000000" +); +pub(crate) const PROCESSED_EVM_BYTECODE: &[u8] = &const_decoder::decode!( + Decoder::Hex, + b"6080604052348015600e575f80fd5b50600436106030575f3560e01c8063816898ff146034578063\ + fb5343f314604c575b5f80fd5b604a60048036038101906046919060a6565b6066565b005b605260\ + 6f565b604051605d919060d9565b60405180910390f35b805f8190555050565b5f5481565b5f80fd\ + 5b5f819050919050565b6088816078565b81146091575f80fd5b50565b5f8135905060a081608156\ + 5b92915050565b5f6020828403121560b85760b76074565b5b5f60c3848285016094565b91505092\ + 915050565b60d3816078565b82525050565b5f60208201905060ea5f83018460cc565b9291505056\ + fea2646970667358221220caca1247066da378f2ec77c310f2ae51576272367b4fa11cc4350af4e9\ + ce4d0964736f6c634300081a0033" +); const EXPENSIVE_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; -pub(crate) const EXPENSIVE_CONTRACT_ADDRESS: Address = Address::repeat_byte(2); - const PRECOMPILES_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json"; -pub(crate) const PRECOMPILES_CONTRACT_ADDRESS: Address = Address::repeat_byte(3); - const COUNTER_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json"; -pub(crate) const COUNTER_CONTRACT_ADDRESS: Address = Address::repeat_byte(4); - const INFINITE_LOOP_CONTRACT_PATH: &str = "etc/contracts-test-data/artifacts-zk/contracts/infinite/infinite.sol/InfiniteLoop.json"; -pub(crate) const INFINITE_LOOP_CONTRACT_ADDRESS: Address = Address::repeat_byte(5); - -pub(crate) fn read_expensive_contract_bytecode() -> Vec { - read_bytecode(EXPENSIVE_CONTRACT_PATH) -} - -pub(crate) fn read_precompiles_contract_bytecode() -> Vec { - read_bytecode(PRECOMPILES_CONTRACT_PATH) -} - -pub(crate) fn read_counter_contract_bytecode() -> Vec { - read_bytecode(COUNTER_CONTRACT_PATH) -} - -pub(crate) fn read_infinite_loop_contract_bytecode() -> Vec { - read_bytecode(INFINITE_LOOP_CONTRACT_PATH) -} +const MULTICALL3_CONTRACT_PATH: &str = + "contracts/l2-contracts/artifacts-zk/contracts/dev-contracts/Multicall3.sol/Multicall3.json"; /// Inflates the provided bytecode by appending the specified amount of NOP instructions at the end. -pub(crate) fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { +fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { bytecode.extend( iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) .take(nop_count) @@ -56,25 +71,266 @@ pub(crate) fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { } fn default_fee() -> Fee { + let fee_input = FeeParams::sensible_v1_default().scale(1.0, 1.0); + let (max_fee_per_gas, gas_per_pubdata_limit) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::default().into()); Fee { - gas_limit: 200_000.into(), - max_fee_per_gas: 55.into(), + gas_limit: 10_000_000.into(), + max_fee_per_gas: max_fee_per_gas.into(), max_priority_fee_per_gas: 0_u64.into(), - gas_per_pubdata_limit: 555.into(), + gas_per_pubdata_limit: gas_per_pubdata_limit.into(), } } +#[derive(Debug, Default)] +pub(crate) struct StateBuilder { + inner: HashMap, +} + +impl StateBuilder { + pub(crate) const LOAD_TEST_ADDRESS: Address = Address::repeat_byte(1); + pub(crate) const EXPENSIVE_CONTRACT_ADDRESS: Address = Address::repeat_byte(2); + pub(crate) const PRECOMPILES_CONTRACT_ADDRESS: Address = Address::repeat_byte(3); + const COUNTER_CONTRACT_ADDRESS: Address = Address::repeat_byte(4); + const INFINITE_LOOP_CONTRACT_ADDRESS: Address = Address::repeat_byte(5); + const MULTICALL3_ADDRESS: Address = Address::repeat_byte(6); + + pub fn with_contract(mut self, address: Address, bytecode: Vec) -> Self { + self.inner.insert( + address, + OverrideAccount { + code: Some(Bytecode::new(bytecode).unwrap()), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn inflate_bytecode(mut self, address: Address, nop_count: usize) -> Self { + let account_override = self.inner.get_mut(&address).expect("no contract"); + let bytecode = account_override.code.take().expect("no code override"); + let mut bytecode = bytecode.into_bytes(); + inflate_bytecode(&mut bytecode, nop_count); + account_override.code = Some(Bytecode::new(bytecode).unwrap()); + self + } + + pub fn with_load_test_contract(mut self) -> Self { + // Set the array length in the load test contract to 100, so that reads don't fail. + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); + self.inner.insert( + Self::LOAD_TEST_ADDRESS, + OverrideAccount { + code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), + state: Some(OverrideState::State(state)), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn with_balance(mut self, address: Address, balance: U256) -> Self { + self.inner.entry(address).or_default().balance = Some(balance); + self + } + + pub fn with_expensive_contract(self) -> Self { + self.with_contract( + Self::EXPENSIVE_CONTRACT_ADDRESS, + read_bytecode(EXPENSIVE_CONTRACT_PATH), + ) + } + + pub fn with_precompiles_contract(self) -> Self { + self.with_contract( + Self::PRECOMPILES_CONTRACT_ADDRESS, + read_bytecode(PRECOMPILES_CONTRACT_PATH), + ) + } + + pub fn with_counter_contract(self, initial_value: u64) -> Self { + let mut this = self.with_contract( + Self::COUNTER_CONTRACT_ADDRESS, + read_bytecode(COUNTER_CONTRACT_PATH), + ); + if initial_value != 0 { + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(initial_value))]); + this.inner + .get_mut(&Self::COUNTER_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::State(state)); + } + this + } + + pub fn with_infinite_loop_contract(self) -> Self { + self.with_contract( + Self::INFINITE_LOOP_CONTRACT_ADDRESS, + read_bytecode(INFINITE_LOOP_CONTRACT_PATH), + ) + } + + pub fn with_multicall3_contract(self) -> Self { + self.with_contract( + Self::MULTICALL3_ADDRESS, + read_bytecode(MULTICALL3_CONTRACT_PATH), + ) + } + + pub fn build(self) -> StateOverride { + StateOverride::new(self.inner) + } + + /// Applies these state overrides to Postgres storage, which is assumed to be empty (other than genesis data). + pub async fn apply(self, connection: &mut Connection<'_, Core>) { + let mut storage_logs = vec![]; + let mut factory_deps = HashMap::new(); + for (address, account) in self.inner { + if let Some(balance) = account.balance { + let balance_key = storage_key_for_eth_balance(&address); + storage_logs.push(StorageLog::new_write_log( + balance_key, + u256_to_h256(balance), + )); + } + if let Some(code) = account.code { + let code_hash = code.hash(); + storage_logs.extend([ + StorageLog::new_write_log(get_code_key(&address), code_hash), + StorageLog::new_write_log( + get_known_code_key(&code_hash), + H256::from_low_u64_be(1), + ), + ]); + factory_deps.insert(code_hash, code.into_bytes()); + } + if let Some(state) = account.state { + let state_slots = match state { + OverrideState::State(slots) | OverrideState::StateDiff(slots) => slots, + }; + let state_logs = state_slots.into_iter().map(|(key, value)| { + let key = StorageKey::new(AccountTreeId::new(address), key); + StorageLog::new_write_log(key, value) + }); + storage_logs.extend(state_logs); + } + } + + connection + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &storage_logs) + .await + .unwrap(); + connection + .factory_deps_dal() + .insert_factory_deps(L2BlockNumber(0), &factory_deps) + .await + .unwrap(); + } +} + +#[derive(Debug)] +pub(crate) struct Call3Value { + target: Address, + allow_failure: bool, + value: U256, + calldata: Vec, +} + +impl Call3Value { + pub fn allow_failure(mut self) -> Self { + self.allow_failure = true; + self + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::Address(self.target), + Token::Bool(self.allow_failure), + Token::Uint(self.value), + Token::Bytes(self.calldata.clone()), + ]) + } +} + +impl From for Call3Value { + fn from(req: CallRequest) -> Self { + Self { + target: req.to.unwrap(), + allow_failure: false, + value: req.value.unwrap_or_default(), + calldata: req.data.unwrap_or_default().0, + } + } +} + +impl From for Call3Value { + fn from(tx: L2Tx) -> Self { + Self { + target: tx.recipient_account().unwrap(), + allow_failure: false, + value: tx.execute.value, + calldata: tx.execute.calldata, + } + } +} + +#[derive(Debug)] +pub(crate) struct Call3Result { + pub success: bool, + pub return_data: Vec, +} + +impl Call3Result { + pub fn parse(raw: &[u8]) -> Vec { + let mut tokens = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .decode_output(raw) + .expect("failed decoding `aggregate3Value` output"); + assert_eq!(tokens.len(), 1, "Invalid output length"); + let Token::Array(results) = tokens.pop().unwrap() else { + panic!("Invalid token type, expected an array"); + }; + results.into_iter().map(Self::parse_single).collect() + } + + fn parse_single(token: Token) -> Self { + let Token::Tuple(mut tokens) = token else { + panic!("Invalid token type, expected a tuple"); + }; + assert_eq!(tokens.len(), 2); + let return_data = tokens.pop().unwrap().into_bytes().expect("expected bytes"); + let success = tokens.pop().unwrap().into_bool().expect("expected bool"); + Self { + success, + return_data, + } + } + + pub fn as_u256(&self) -> U256 { + decode_u256_output(&self.return_data) + } +} + +pub(crate) fn decode_u256_output(raw_output: &[u8]) -> U256 { + let mut tokens = ethabi::decode_whole(&[ethabi::ParamType::Uint(256)], raw_output) + .expect("unexpected return data"); + assert_eq!(tokens.len(), 1); + tokens.pop().unwrap().into_uint().unwrap() +} + pub(crate) trait TestAccount { - fn create_transfer(&self, value: U256, fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { + fn create_transfer(&self, value: U256) -> L2Tx { let fee = Fee { gas_limit: 200_000.into(), - max_fee_per_gas: fee_per_gas.into(), - max_priority_fee_per_gas: 0_u64.into(), - gas_per_pubdata_limit: gas_per_pubdata.into(), + ..default_fee() }; self.create_transfer_with_fee(value, fee) } + fn query_base_token_balance(&self) -> CallRequest; + fn create_transfer_with_fee(&self, value: U256, fee: Fee) -> L2Tx; fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx; @@ -85,9 +341,13 @@ pub(crate) trait TestAccount { fn create_code_oracle_tx(&self, bytecode_hash: H256, expected_keccak_hash: H256) -> L2Tx; - fn create_reverting_counter_tx(&self) -> L2Tx; + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx; + + fn query_counter_value(&self) -> CallRequest; fn create_infinite_loop_tx(&self) -> L2Tx; + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest; } impl TestAccount for K256PrivateKey { @@ -106,9 +366,23 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn query_base_token_balance(&self) -> CallRequest { + let data = eth_contract() + .function("balanceOf") + .expect("No `balanceOf` function in contract") + .encode_input(&[Token::Uint(address_to_u256(&self.address()))]) + .expect("failed encoding `balanceOf` function"); + CallRequest { + from: Some(self.address()), + to: Some(L2_BASE_TOKEN_ADDRESS), + data: Some(data.into()), + ..CallRequest::default() + } + } + fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx { L2Tx::new_signed( - Some(LOAD_TEST_ADDRESS), + Some(StateBuilder::LOAD_TEST_ADDRESS), params.to_bytes(), Nonce(0), default_fee(), @@ -132,7 +406,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[Token::Uint(write_count.into())]) .expect("failed encoding `expensive` function"); L2Tx::new_signed( - Some(EXPENSIVE_CONTRACT_ADDRESS), + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -152,7 +426,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[]) .expect("failed encoding `cleanUp` input"); L2Tx::new_signed( - Some(EXPENSIVE_CONTRACT_ADDRESS), + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -175,7 +449,7 @@ impl TestAccount for K256PrivateKey { ]) .expect("failed encoding `callCodeOracle` input"); L2Tx::new_signed( - Some(PRECOMPILES_CONTRACT_ADDRESS), + Some(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -188,14 +462,14 @@ impl TestAccount for K256PrivateKey { .unwrap() } - fn create_reverting_counter_tx(&self) -> L2Tx { + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx { let calldata = load_contract(COUNTER_CONTRACT_PATH) .function("incrementWithRevert") .expect("no `incrementWithRevert` function") - .encode_input(&[Token::Uint(1.into()), Token::Bool(true)]) + .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) .expect("failed encoding `incrementWithRevert` input"); L2Tx::new_signed( - Some(COUNTER_CONTRACT_ADDRESS), + Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -208,6 +482,20 @@ impl TestAccount for K256PrivateKey { .unwrap() } + fn query_counter_value(&self) -> CallRequest { + let calldata = load_contract(COUNTER_CONTRACT_PATH) + .function("get") + .expect("no `get` function") + .encode_input(&[]) + .expect("failed encoding `get` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + data: Some(calldata.into()), + ..CallRequest::default() + } + } + fn create_infinite_loop_tx(&self) -> L2Tx { let calldata = load_contract(INFINITE_LOOP_CONTRACT_PATH) .function("infiniteLoop") @@ -215,7 +503,7 @@ impl TestAccount for K256PrivateKey { .encode_input(&[]) .expect("failed encoding `infiniteLoop` input"); L2Tx::new_signed( - Some(INFINITE_LOOP_CONTRACT_ADDRESS), + Some(StateBuilder::INFINITE_LOOP_CONTRACT_ADDRESS), calldata, Nonce(0), default_fee(), @@ -227,4 +515,20 @@ impl TestAccount for K256PrivateKey { ) .unwrap() } + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest { + let call_tokens = calls.iter().map(Call3Value::to_token).collect(); + let calldata = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .encode_input(&[Token::Array(call_tokens)]) + .expect("failed encoding `aggregate3Value` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::MULTICALL3_ADDRESS), + value: Some(value), + data: Some(calldata.into()), + ..CallRequest::default() + } + } } diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs index f5e42875a3d..b4a05a0756b 100644 --- a/core/node/api_server/src/tx_sender/gas_estimation.rs +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -44,13 +44,14 @@ impl TxSender { pub async fn get_txs_fee_in_wei( &self, tx: Transaction, + block_args: BlockArgs, estimated_fee_scale_factor: f64, acceptable_overestimation: u64, state_override: Option, kind: BinarySearchKind, ) -> Result { let estimation_started_at = Instant::now(); - let mut estimator = GasEstimator::new(self, tx, state_override).await?; + let mut estimator = GasEstimator::new(self, tx, block_args, state_override).await?; estimator.adjust_transaction_fee(); let initial_estimate = estimator.initialize().await?; @@ -130,10 +131,7 @@ impl TxSender { if let Some(pivot) = initial_pivot { let iteration_started_at = Instant::now(); - let (result, _) = estimator - .step(pivot) - .await - .context("estimate_gas step failed")?; + let (result, _) = estimator.step(pivot).await?; Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, pivot, &result); tracing::trace!( @@ -150,10 +148,7 @@ impl TxSender { // or normal execution errors, so we just hope that increasing the // gas limit will make the transaction successful let iteration_started_at = Instant::now(); - let (result, _) = estimator - .step(mid) - .await - .context("estimate_gas step failed")?; + let (result, _) = estimator.step(mid).await?; Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, mid, &result); tracing::trace!( @@ -205,7 +200,11 @@ impl TxSender { tx.initiator_account(), tx.execute.value ); - return Err(SubmitTxError::InsufficientFundsForTransfer); + return Err(SubmitTxError::NotEnoughBalanceForFeeValue( + balance, + 0.into(), + tx.execute.value, + )); } } Ok(()) @@ -309,16 +308,10 @@ impl<'a> GasEstimator<'a> { pub(super) async fn new( sender: &'a TxSender, mut transaction: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { - let mut connection = sender.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - let protocol_version = connection - .blocks_dal() - .pending_protocol_version() - .await - .context("failed getting pending protocol version")?; - drop(connection); + let protocol_version = block_args.protocol_version(); let max_gas_limit = get_max_batch_gas_limit(protocol_version.into()); let fee_input = adjust_pubdata_price_for_tx( @@ -398,10 +391,7 @@ impl<'a> GasEstimator<'a> { // For L2 transactions, we estimate the amount of gas needed to cover for the pubdata by creating a transaction with infinite gas limit, // and getting how much pubdata it used. - let (result, _) = self - .unadjusted_step(self.max_gas_limit) - .await - .context("estimate_gas step failed")?; + let (result, _) = self.unadjusted_step(self.max_gas_limit).await?; // If the transaction has failed with such a large gas limit, we return an API error here right away, // since the inferred gas bounds would be unreliable in this case. result.check_api_call_result()?; @@ -435,7 +425,7 @@ impl<'a> GasEstimator<'a> { async fn step( &self, tx_gas_limit: u64, - ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { let gas_limit_with_overhead = tx_gas_limit + self.tx_overhead(tx_gas_limit); // We need to ensure that we never use a gas limit that is higher than the maximum allowed let forced_gas_limit = @@ -446,13 +436,16 @@ impl<'a> GasEstimator<'a> { pub(super) async fn unadjusted_step( &self, forced_gas_limit: u64, - ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + ) -> Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics), SubmitTxError> { let mut tx = self.transaction.clone(); match &mut tx.common_data { ExecuteTransactionCommon::L1(l1_common_data) => { l1_common_data.gas_limit = forced_gas_limit.into(); - let required_funds = - l1_common_data.gas_limit * l1_common_data.max_fee_per_gas + tx.execute.value; + // Since `tx.execute.value` is supplied by the client and is not checked against the current balance (unlike for L2 transactions), + // we may hit an integer overflow. Ditto for protocol upgrade transactions below. + let required_funds = (l1_common_data.gas_limit * l1_common_data.max_fee_per_gas) + .checked_add(tx.execute.value) + .ok_or(SubmitTxError::MintedAmountOverflow)?; l1_common_data.to_mint = required_funds; } ExecuteTransactionCommon::L2(l2_common_data) => { @@ -460,8 +453,9 @@ impl<'a> GasEstimator<'a> { } ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { common_data.gas_limit = forced_gas_limit.into(); - let required_funds = - common_data.gas_limit * common_data.max_fee_per_gas + tx.execute.value; + let required_funds = (common_data.gas_limit * common_data.max_fee_per_gas) + .checked_add(tx.execute.value) + .ok_or(SubmitTxError::MintedAmountOverflow)?; common_data.to_mint = required_funds; } } @@ -490,10 +484,7 @@ impl<'a> GasEstimator<'a> { suggested_gas_limit: u64, estimated_fee_scale_factor: f64, ) -> Result { - let (result, tx_metrics) = self - .step(suggested_gas_limit) - .await - .context("final estimate_gas step failed")?; + let (result, tx_metrics) = self.step(suggested_gas_limit).await?; result.into_api_call_result()?; self.sender .ensure_tx_executable(&self.transaction, &tx_metrics, false)?; diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index ad8e38ef3cc..38794fe7137 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -29,7 +29,9 @@ use zksync_types::{ MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::h256_to_u256; -use zksync_vm_executor::oneshot::{CallOrExecute, EstimateGas, OneshotEnvParameters}; +use zksync_vm_executor::oneshot::{ + CallOrExecute, EstimateGas, MultiVMBaseSystemContracts, OneshotEnvParameters, +}; pub(super) use self::{gas_estimation::BinarySearchKind, result::SubmitTxError}; use self::{master_pool_sink::MasterPoolSink, result::ApiCallResult, tx_sink::TxSink}; @@ -102,15 +104,28 @@ impl SandboxExecutorOptions { operator_account: AccountTreeId, validation_computational_gas_limit: u32, ) -> anyhow::Result { + let estimate_gas_contracts = + tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_estimate_gas_blocking) + .await + .context("failed loading base contracts for gas estimation")?; + let call_contracts = + tokio::task::spawn_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking) + .await + .context("failed loading base contracts for calls / tx execution")?; + Ok(Self { - estimate_gas: OneshotEnvParameters::for_gas_estimation(chain_id, operator_account) - .await?, - eth_call: OneshotEnvParameters::for_execution( + estimate_gas: OneshotEnvParameters::new( + Arc::new(estimate_gas_contracts), + chain_id, + operator_account, + u32::MAX, + ), + eth_call: OneshotEnvParameters::new( + Arc::new(call_contracts), chain_id, operator_account, validation_computational_gas_limit, - ) - .await?, + ), }) } @@ -280,13 +295,11 @@ impl TxSender { pub async fn submit_tx( &self, tx: L2Tx, + block_args: BlockArgs, ) -> Result<(L2TxSubmissionResult, VmExecutionResultAndLogs), SubmitTxError> { let tx_hash = tx.hash(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::Validate); - let mut connection = self.acquire_replica_connection().await?; - let protocol_version = connection.blocks_dal().pending_protocol_version().await?; - drop(connection); - self.validate_tx(&tx, protocol_version).await?; + self.validate_tx(&tx, block_args.protocol_version()).await?; stage_latency.observe(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DryRun); @@ -305,9 +318,7 @@ impl TxSender { tx: tx.clone(), }; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; - let mut connection = self.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - + let connection = self.acquire_replica_connection().await?; let execution_output = self .0 .executor diff --git a/core/node/api_server/src/tx_sender/result.rs b/core/node/api_server/src/tx_sender/result.rs index a49313f0dd6..e2a51ae8e9a 100644 --- a/core/node/api_server/src/tx_sender/result.rs +++ b/core/node/api_server/src/tx_sender/result.rs @@ -24,8 +24,6 @@ pub enum SubmitTxError { GasLimitIsTooBig, #[error("{0}")] Unexecutable(String), - #[error("too many transactions")] - RateLimitExceeded, #[error("server shutting down")] ServerShuttingDown, #[error("failed to include transaction in the system. reason: {0}")] @@ -49,29 +47,23 @@ pub enum SubmitTxError { that caused this error. Error description: {0}" )] UnexpectedVMBehavior(String), - #[error("pubdata price limit is too low, ensure that the price limit is correct")] - UnrealisticPubdataPriceLimit, #[error( "too many factory dependencies in the transaction. {0} provided, while only {1} allowed" )] TooManyFactoryDependencies(usize, usize), - #[error("max fee per gas higher than 2^32")] - FeePerGasTooHigh, - #[error("max fee per pubdata byte higher than 2^32")] - FeePerPubdataByteTooHigh, - /// InsufficientFundsForTransfer is returned if the transaction sender doesn't - /// have enough funds for transfer. - #[error("insufficient balance for transfer")] - InsufficientFundsForTransfer, /// IntrinsicGas is returned if the transaction is specified to use less gas /// than required to start the invocation. #[error("intrinsic gas too low")] IntrinsicGas, - /// Error returned from main node - #[error("{0}")] - ProxyError(#[from] EnrichedClientError), #[error("not enough gas to publish compressed bytecodes")] FailedToPublishCompressedBytecodes, + /// Currently only triggered during gas estimation for L1 and protocol upgrade transactions. + #[error("integer overflow computing base token amount to mint")] + MintedAmountOverflow, + + /// Error returned from main node. + #[error("{0}")] + ProxyError(#[from] EnrichedClientError), /// Catch-all internal error (e.g., database error) that should not be exposed to the caller. #[error("internal error")] Internal(#[from] anyhow::Error), @@ -88,7 +80,6 @@ impl SubmitTxError { Self::ExecutionReverted(_, _) => "execution-reverted", Self::GasLimitIsTooBig => "gas-limit-is-too-big", Self::Unexecutable(_) => "unexecutable", - Self::RateLimitExceeded => "rate-limit-exceeded", Self::ServerShuttingDown => "shutting-down", Self::BootloaderFailure(_) => "bootloader-failure", Self::ValidationFailed(_) => "validation-failed", @@ -99,14 +90,11 @@ impl SubmitTxError { Self::MaxFeePerGasTooLow => "max-fee-per-gas-too-low", Self::MaxPriorityFeeGreaterThanMaxFee => "max-priority-fee-greater-than-max-fee", Self::UnexpectedVMBehavior(_) => "unexpected-vm-behavior", - Self::UnrealisticPubdataPriceLimit => "unrealistic-pubdata-price-limit", Self::TooManyFactoryDependencies(_, _) => "too-many-factory-dependencies", - Self::FeePerGasTooHigh => "gas-price-limit-too-high", - Self::FeePerPubdataByteTooHigh => "pubdata-price-limit-too-high", - Self::InsufficientFundsForTransfer => "insufficient-funds-for-transfer", Self::IntrinsicGas => "intrinsic-gas", - Self::ProxyError(_) => "proxy-error", Self::FailedToPublishCompressedBytecodes => "failed-to-publish-compressed-bytecodes", + Self::MintedAmountOverflow => "minted-amount-overflow", + Self::ProxyError(_) => "proxy-error", Self::Internal(_) => "internal", } } diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests.rs deleted file mode 100644 index 36c95fa5db0..00000000000 --- a/core/node/api_server/src/tx_sender/tests.rs +++ /dev/null @@ -1,805 +0,0 @@ -//! Tests for the transaction sender. - -use std::{collections::HashMap, time::Duration}; - -use assert_matches::assert_matches; -use test_casing::{test_casing, Product, TestCases}; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_multivm::interface::ExecutionResult; -use zksync_node_fee_model::MockBatchFeeParamsProvider; -use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; -use zksync_system_constants::CODE_ORACLE_ADDRESS; -use zksync_types::{ - api, - api::state_override::{Bytecode, OverrideAccount, OverrideState}, - get_nonce_key, - web3::keccak256, - K256PrivateKey, L1BatchNumber, L2BlockNumber, StorageLog, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use zksync_vm_executor::oneshot::MockOneshotExecutor; - -use super::{gas_estimation::GasEstimator, *}; -use crate::{ - execution_sandbox::BlockStartInfo, - testonly::{ - inflate_bytecode, read_counter_contract_bytecode, read_expensive_contract_bytecode, - read_infinite_loop_contract_bytecode, read_precompiles_contract_bytecode, TestAccount, - COUNTER_CONTRACT_ADDRESS, EXPENSIVE_CONTRACT_ADDRESS, INFINITE_LOOP_CONTRACT_ADDRESS, - LOAD_TEST_ADDRESS, PRECOMPILES_CONTRACT_ADDRESS, - }, - web3::testonly::create_test_tx_sender, -}; - -/// Initial pivot multiplier empirically sufficient for most tx types. -const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; - -#[tokio::test] -async fn getting_nonce_for_account() { - let l2_chain_id = L2ChainId::default(); - let test_address = Address::repeat_byte(1); - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - // Manually insert a nonce for the address. - let nonce_key = get_nonce_key(&test_address); - let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(123)); - storage - .storage_logs_dal() - .append_storage_logs(L2BlockNumber(0), &[nonce_log]) - .await - .unwrap(); - - let tx_executor = MockOneshotExecutor::default(); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(123)); - - // Insert another L2 block with a new nonce log. - storage - .blocks_dal() - .insert_l2_block(&create_l2_block(1)) - .await - .unwrap(); - let nonce_log = StorageLog { - value: H256::from_low_u64_be(321), - ..nonce_log - }; - storage - .storage_logs_dal() - .insert_storage_logs(L2BlockNumber(1), &[nonce_log]) - .await - .unwrap(); - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(321)); - let missing_address = Address::repeat_byte(0xff); - let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); - assert_eq!(nonce, Nonce(0)); -} - -#[tokio::test] -async fn getting_nonce_for_account_after_snapshot_recovery() { - const SNAPSHOT_L2_BLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); - - let pool = ConnectionPool::::test_pool().await; - let mut storage = pool.connection().await.unwrap(); - let test_address = Address::repeat_byte(1); - let other_address = Address::repeat_byte(2); - let nonce_logs = [ - StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), - StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), - ]; - prepare_recovery_snapshot( - &mut storage, - L1BatchNumber(23), - SNAPSHOT_L2_BLOCK_NUMBER, - &nonce_logs, - ) - .await; - - let l2_chain_id = L2ChainId::default(); - let tx_executor = MockOneshotExecutor::default(); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - storage - .blocks_dal() - .insert_l2_block(&create_l2_block(SNAPSHOT_L2_BLOCK_NUMBER.0 + 1)) - .await - .unwrap(); - let new_nonce_logs = vec![StorageLog::new_write_log( - get_nonce_key(&test_address), - H256::from_low_u64_be(321), - )]; - storage - .storage_logs_dal() - .insert_storage_logs(SNAPSHOT_L2_BLOCK_NUMBER + 1, &new_nonce_logs) - .await - .unwrap(); - - let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); - assert_eq!(nonce, Nonce(321)); - let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); - assert_eq!(nonce, Nonce(25)); - let missing_address = Address::repeat_byte(0xff); - let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); - assert_eq!(nonce, Nonce(0)); -} - -#[tokio::test] -async fn submitting_tx_requires_one_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - - let l2_chain_id = L2ChainId::default(); - let fee_input = MockBatchFeeParamsProvider::default() - .get_batch_fee_input_scaled(1.0, 1.0) - .await - .unwrap(); - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = create_l2_transaction(base_fee, gas_per_pubdata); - let tx_hash = tx.hash(); - - // Manually set sufficient balance for the tx initiator. - let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); - let storage_log = StorageLog::new_write_log(balance_key, u256_to_h256(U256::one() << 64)); - storage - .storage_logs_dal() - .append_storage_logs(L2BlockNumber(0), &[storage_log]) - .await - .unwrap(); - drop(storage); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_tx_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { output: vec![] } - }); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - let submission_result = tx_sender.submit_tx(tx).await.unwrap(); - assert_matches!(submission_result.0, L2TxSubmissionResult::Added); - - let mut storage = pool.connection().await.unwrap(); - storage - .transactions_web3_dal() - .get_transaction_by_hash(tx_hash, l2_chain_id) - .await - .unwrap() - .expect("transaction is not persisted"); -} - -#[tokio::test] -async fn eth_call_requires_single_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - let genesis_params = GenesisParams::mock(); - insert_genesis_batch(&mut storage, &genesis_params) - .await - .unwrap(); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) - .await - .unwrap(); - let block_id = api::BlockId::Number(api::BlockNumber::Latest); - let block_args = BlockArgs::new(&mut storage, block_id, &start_info) - .await - .unwrap(); - drop(storage); - - let tx = create_l2_transaction(10, 100); - let tx_hash = tx.hash(); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_call_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { - output: b"success!".to_vec(), - } - }); - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, _) = create_test_tx_sender( - pool.clone(), - genesis_params.config().l2_chain_id, - tx_executor, - ) - .await; - let call_overrides = CallOverrides { - enforced_base_fee: None, - }; - let output = tx_sender - .eth_call(block_args, call_overrides, tx, None) - .await - .unwrap(); - assert_eq!(output, b"success!"); -} - -async fn create_real_tx_sender() -> TxSender { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - let genesis_params = GenesisParams::mock(); - insert_genesis_batch(&mut storage, &genesis_params) - .await - .unwrap(); - drop(storage); - - let genesis_config = genesis_params.config(); - let executor_options = SandboxExecutorOptions::new( - genesis_config.l2_chain_id, - AccountTreeId::new(genesis_config.fee_account), - u32::MAX, - ) - .await - .unwrap(); - - let pg_caches = PostgresStorageCaches::new(1, 1); - let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); - create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) - .await - .0 -} - -#[tokio::test] -async fn initial_gas_estimation_is_somewhat_accurate() { - let tx_sender = create_real_tx_sender().await; - - let alice = K256PrivateKey::random(); - let transfer_value = U256::from(1_000_000_000); - let account_overrides = OverrideAccount { - balance: Some(transfer_value * 2), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); - // fee params don't matter; we adjust via `adjust_transaction_fee()` - let tx = alice.create_transfer(transfer_value, 55, 555); - - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - let initial_estimate = estimator.initialize().await.unwrap(); - assert!(initial_estimate.gas_charged_for_pubdata > 0); - assert!(initial_estimate.operator_overhead > 0); - let total_gas_charged = initial_estimate.total_gas_charged.unwrap(); - assert!( - total_gas_charged - > initial_estimate.gas_charged_for_pubdata + initial_estimate.operator_overhead, - "{initial_estimate:?}" - ); - - // Check that a transaction fails if supplied with the lower bound. - let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() - + initial_estimate.operator_overhead; - assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); - let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); - - // A slightly larger limit should work. - let initial_pivot = total_gas_charged * 64 / 63; - let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); -} - -const LOAD_TEST_CASES: TestCases = test_casing::cases! {[ - LoadnextContractExecutionParams::default(), - // No storage modification - LoadnextContractExecutionParams { - writes: 0, - events: 0, - ..LoadnextContractExecutionParams::default() - }, - // Moderately deep recursion (very deep recursion is tested separately) - LoadnextContractExecutionParams { - recursive_calls: 10, - ..LoadnextContractExecutionParams::default() - }, - // No deploys - LoadnextContractExecutionParams { - deploys: 0, - ..LoadnextContractExecutionParams::default() - }, - // Lots of deploys - LoadnextContractExecutionParams { - deploys: 10, - ..LoadnextContractExecutionParams::default() - }, -]}; - -#[test_casing(5, LOAD_TEST_CASES)] -#[tokio::test] -async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractExecutionParams) { - let alice = K256PrivateKey::random(); - // Set the array length in the load test contract to 100, so that reads don't fail. - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(tx_params); - - test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; -} - -#[test_casing(2, [false, true])] -#[tokio::test] -async fn initial_estimate_for_deep_recursion(with_reads: bool) { - let alice = K256PrivateKey::random(); - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - - // Reads are chosen because they represent the worst case. Reads don't influence the amount of pubdata; - // i.e., they don't make it easier to execute a transaction because of additional gas reserved for pubdata. - // OTOH, reads still increase the amount of computational gas used on each nested call. - // - // Initial pivot multipliers below are the smallest ones with 0.1 precision. `DEFAULT_MULTIPLIER` works for smaller - // recursion depths because the transaction emits enough pubdata to cover gas deductions due to the 63/64 rule. - let depths_and_multipliers: &[_] = if with_reads { - &[(25, DEFAULT_MULTIPLIER), (50, 1.2), (75, 1.4), (100, 1.7)] - } else { - &[ - (50, DEFAULT_MULTIPLIER), - (75, 1.2), - (100, 1.4), - (125, 1.7), - (150, 2.1), - ] - }; - for &(recursion_depth, multiplier) in depths_and_multipliers { - println!("Testing recursion depth {recursion_depth}"); - let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { - recursive_calls: recursion_depth, - reads: if with_reads { 10 } else { 0 }, - ..LoadnextContractExecutionParams::empty() - }); - test_initial_estimate(state_override.clone(), tx, multiplier).await; - } -} - -#[tokio::test] -async fn initial_estimate_for_deep_recursion_with_large_bytecode() { - let alice = K256PrivateKey::random(); - let mut contract_bytecode = get_loadnext_contract().bytecode; - inflate_bytecode(&mut contract_bytecode, 50_000); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { - recursive_calls: 100, - ..LoadnextContractExecutionParams::empty() - }); - - test_initial_estimate(state_override, tx, 1.35).await; -} - -/// Tests the lower bound and initial pivot extracted from the initial estimate (one with effectively infinite gas amount). -/// Returns the VM result for a VM run with the initial pivot. -async fn test_initial_estimate( - state_override: StateOverride, - tx: L2Tx, - initial_pivot_multiplier: f64, -) -> VmExecutionResultAndLogs { - let tx_sender = create_real_tx_sender().await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - let initial_estimate = estimator.initialize().await.unwrap(); - - let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() - + initial_estimate.operator_overhead; - let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); - assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); - - // A slightly larger limit should work. - let initial_pivot = - (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; - let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); - assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); - vm_result -} - -async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { - let tx_sender = create_real_tx_sender().await; - let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) - .await - .unwrap(); - estimator.adjust_transaction_fee(); - estimator.initialize().await.unwrap_err() -} - -/// Estimates both transactions with initial writes and cleanup. -#[test_casing(4, [10, 50, 200, 1_000])] -#[tokio::test] -async fn initial_estimate_for_expensive_contract(write_count: usize) { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_expensive_contract_bytecode(); - let mut contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides.clone(), - )])); - let tx = alice.create_expensive_tx(write_count); - - let vm_result = test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; - - let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { - (*log.log.key.address() == EXPENSIVE_CONTRACT_ADDRESS) - .then_some((*log.log.key.key(), log.log.value)) - }); - let contract_logs: HashMap<_, _> = contract_logs.collect(); - assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); - contract_overrides.state = Some(OverrideState::StateDiff(contract_logs)); - - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides, - )])); - let tx = alice.create_expensive_cleanup_tx(); - - test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; -} - -#[tokio::test] -async fn initial_estimate_for_code_oracle_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_precompiles_contract_bytecode(); - let contract_bytecode_hash = hash_bytecode(&contract_bytecode); - let contract_keccak_hash = H256(keccak256(&contract_bytecode)); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - // Add another contract that is never executed, but has a large bytecode. - let huge_contact_address = Address::repeat_byte(23); - let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); - let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); - let huge_contract_overrides = OverrideAccount { - code: Some(Bytecode::new(huge_contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([ - (PRECOMPILES_CONTRACT_ADDRESS, contract_overrides), - (huge_contact_address, huge_contract_overrides), - ])); - - // Test contracts that are already decommitted when requested from the precompiles test contract. - let genesis_params = GenesisParams::mock(); - let code_oracle_bytecode = genesis_params - .system_contracts() - .iter() - .find_map(|contract| { - (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) - }) - .expect("no code oracle"); - let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); - let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); - - let warm_bytecode_hashes = [ - (code_oracle_bytecode_hash, code_oracle_keccak_hash), - (contract_bytecode_hash, contract_keccak_hash), - ]; - let mut decomitter_stats = 0.0; - for (hash, keccak_hash) in warm_bytecode_hashes { - println!("Testing bytecode: {hash:?}"); - let tx = alice.create_code_oracle_tx(hash, keccak_hash); - let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; - let stats = &vm_result.statistics.circuit_statistic; - decomitter_stats = stats.code_decommitter.max(decomitter_stats); - } - assert!(decomitter_stats > 0.0); - - println!("Testing large bytecode"); - let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); - let vm_result = test_initial_estimate(state_override, tx, 1.05).await; - // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones - let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; - assert!( - new_decomitter_stats > decomitter_stats * 1.5, - "old={decomitter_stats}, new={new_decomitter_stats}" - ); -} - -#[tokio::test] -async fn initial_estimate_with_large_free_bytecode() { - let alice = K256PrivateKey::random(); - let mut contract_bytecode = read_precompiles_contract_bytecode(); - inflate_bytecode(&mut contract_bytecode, 50_000); - let contract_bytecode_hash = hash_bytecode(&contract_bytecode); - let contract_keccak_hash = H256(keccak256(&contract_bytecode)); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([( - PRECOMPILES_CONTRACT_ADDRESS, - contract_overrides, - )])); - // Ask the test contract to decommit itself. This should refund the decommit costs, but it will be charged at first. - let tx = alice.create_code_oracle_tx(contract_bytecode_hash, contract_keccak_hash); - test_initial_estimate(state_override, tx, 1.05).await; -} - -#[tokio::test] -async fn revert_during_initial_estimate() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_counter_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - COUNTER_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_reverting_counter_tx(); - let err = test_initial_estimate_error(state_override, tx).await; - let SubmitTxError::ExecutionReverted(err, _) = err else { - panic!("Unexpected error: {err:?}"); - }; - assert_eq!(err, "This method always reverts"); -} - -#[tokio::test] -async fn out_of_gas_during_initial_estimate() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_infinite_loop_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - INFINITE_LOOP_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_infinite_loop_tx(); - let err = test_initial_estimate_error(state_override, tx).await; - // Unfortunately, we don't provide human-readable out-of-gas errors at the time - assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); -} - -#[tokio::test] -async fn insufficient_funds_error_for_transfer() { - let tx_sender = create_real_tx_sender().await; - - let alice = K256PrivateKey::random(); - let transfer_value = 1_000_000_000.into(); - // fee params don't matter; they should be overwritten by the estimation logic - let tx = alice.create_transfer(transfer_value, 55, 555); - let fee_scale_factor = 1.0; - // Without overrides, the transaction should fail because of insufficient balance. - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - 1_000, - None, - BinarySearchKind::Full, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::InsufficientFundsForTransfer); -} - -async fn test_estimating_gas( - state_override: StateOverride, - tx: L2Tx, - acceptable_overestimation: u64, -) { - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let fee = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - BinarySearchKind::Full, - ) - .await - .unwrap(); - // Sanity-check gas limit - let gas_limit_after_full_search = u64::try_from(fee.gas_limit).unwrap(); - assert!( - (10_000..10_000_000).contains(&gas_limit_after_full_search), - "{fee:?}" - ); - - let fee = tx_sender - .get_txs_fee_in_wei( - tx.into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - BinarySearchKind::Optimized, - ) - .await - .unwrap(); - let gas_limit_after_optimized_search = u64::try_from(fee.gas_limit).unwrap(); - - let diff = gas_limit_after_full_search.abs_diff(gas_limit_after_optimized_search); - assert!( - diff <= acceptable_overestimation, - "full={gas_limit_after_full_search}, optimized={gas_limit_after_optimized_search}" - ); -} - -#[test_casing(3, [0, 100, 1_000])] -#[tokio::test] -async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { - let alice = K256PrivateKey::random(); - let transfer_value = 1_000_000_000.into(); - let account_overrides = OverrideAccount { - balance: Some(transfer_value * 2), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); - // fee params don't matter; they should be overwritten by the estimation logic - let tx = alice.create_transfer(transfer_value, 55, 555); - - test_estimating_gas(state_override, tx, acceptable_overestimation).await; -} - -#[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] -#[tokio::test] -async fn estimating_gas_for_load_test_tx( - tx_params: LoadnextContractExecutionParams, - acceptable_overestimation: u64, -) { - let alice = K256PrivateKey::random(); - // Set the array length in the load test contract to 100, so that reads don't fail. - let load_test_state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); - let load_test_overrides = OverrideAccount { - code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), - state: Some(OverrideState::State(load_test_state)), - ..OverrideAccount::default() - }; - let state_override = - StateOverride::new(HashMap::from([(LOAD_TEST_ADDRESS, load_test_overrides)])); - let tx = alice.create_load_test_tx(tx_params); - - test_estimating_gas(state_override, tx, acceptable_overestimation).await; -} - -#[test_casing(4, [10, 50, 100, 200])] -#[tokio::test] -async fn estimating_gas_for_expensive_txs(write_count: usize) { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_expensive_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - EXPENSIVE_CONTRACT_ADDRESS, - contract_overrides.clone(), - )])); - let tx = alice.create_expensive_tx(write_count); - - test_estimating_gas(state_override, tx, 0).await; -} - -#[tokio::test] -async fn estimating_gas_for_code_oracle_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_precompiles_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - // Add another contract that is never executed, but has a large bytecode. - let huge_contact_address = Address::repeat_byte(23); - let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; - let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); - let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); - let huge_contract_overrides = OverrideAccount { - code: Some(Bytecode::new(huge_contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - - let state_override = StateOverride::new(HashMap::from([ - (PRECOMPILES_CONTRACT_ADDRESS, contract_overrides), - (huge_contact_address, huge_contract_overrides), - ])); - let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); - - test_estimating_gas(state_override, tx, 0).await; -} - -#[tokio::test] -async fn estimating_gas_for_reverting_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_counter_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - COUNTER_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_reverting_counter_tx(); - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let acceptable_overestimation = 0; - for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - binary_search_kind, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::ExecutionReverted(..)); - } -} - -#[tokio::test] -async fn estimating_gas_for_infinite_loop_tx() { - let alice = K256PrivateKey::random(); - let contract_bytecode = read_infinite_loop_contract_bytecode(); - let contract_overrides = OverrideAccount { - code: Some(Bytecode::new(contract_bytecode).unwrap()), - ..OverrideAccount::default() - }; - let state_override = StateOverride::new(HashMap::from([( - INFINITE_LOOP_CONTRACT_ADDRESS, - contract_overrides, - )])); - - let tx = alice.create_infinite_loop_tx(); - let tx_sender = create_real_tx_sender().await; - - let fee_scale_factor = 1.0; - let acceptable_overestimation = 0; - for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { - let err = tx_sender - .get_txs_fee_in_wei( - tx.clone().into(), - fee_scale_factor, - acceptable_overestimation, - Some(state_override.clone()), - binary_search_kind, - ) - .await - .unwrap_err(); - assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); - } -} diff --git a/core/node/api_server/src/tx_sender/tests/call.rs b/core/node/api_server/src/tx_sender/tests/call.rs new file mode 100644 index 00000000000..e43f55b2b9a --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/call.rs @@ -0,0 +1,253 @@ +//! Tests for `eth_call`. + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::{ + api::state_override::OverrideAccount, transaction_request::CallRequest, K256PrivateKey, +}; + +use super::*; +use crate::testonly::{decode_u256_output, Call3Result, Call3Value, StateBuilder, TestAccount}; + +#[tokio::test] +async fn eth_call_requires_single_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_call_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { + output: b"success!".to_vec(), + } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender( + pool.clone(), + genesis_params.config().l2_chain_id, + tx_executor, + ) + .await; + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + let output = tx_sender + .eth_call(block_args, call_overrides, tx, None) + .await + .unwrap(); + assert_eq!(output, b"success!"); +} + +async fn test_call( + tx_sender: &TxSender, + state_override: StateOverride, + mut call: CallRequest, +) -> Result, SubmitTxError> { + call.gas = call.gas.max(Some(10_000_000.into())); + let call = L2Tx::from_request(call.into(), usize::MAX, true).unwrap(); + + let mut storage = tx_sender + .0 + .replica_connection_pool + .connection() + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + + tx_sender + .eth_call(block_args, call_overrides, call, Some(state_override)) + .await +} + +#[tokio::test] +async fn eth_call_with_balance() { + let alice = K256PrivateKey::random(); + let initial_balance = 123_456_789.into(); + let account_overrides = OverrideAccount { + balance: Some(initial_balance), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let call = alice.query_base_token_balance(); + let output = test_call(&tx_sender, state_override, call).await.unwrap(); + assert_eq!(decode_u256_output(&output), initial_balance); +} + +#[tokio::test] +async fn eth_call_with_transfer() { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let initial_balance = transfer_value * 5 / 3; + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_balance(alice.address(), initial_balance) + .build(); + + let transfer = alice.create_transfer(transfer_value); + let multicall = alice.multicall_with_value( + transfer_value, + &[transfer.into(), alice.query_base_token_balance().into()], + ); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + assert_eq!(call_results.len(), 2); + assert!( + call_results[0].success && call_results[1].success, + "{call_results:?}" + ); + assert!(call_results[0].return_data.is_empty(), "{call_results:?}"); + + let balance = call_results[1].as_u256(); + // The bootloader doesn't compute gas refunds in the call mode, so the equality is exact + assert_eq!(balance, initial_balance - transfer_value); +} + +#[tokio::test] +async fn eth_call_with_counter() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(42).build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call( + &tx_sender, + state_override.clone(), + alice.query_counter_value(), + ) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 42.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), false).into(); + let output = test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 45.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), true).into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!( + err, + SubmitTxError::ExecutionReverted(msg, _) if msg.contains("This method always reverts") + ); +} + +#[tokio::test] +async fn eth_call_with_counter_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_counter_contract(0) + .build(); + + let multicall = alice.multicall_with_value( + 0.into(), + &[ + alice.create_counter_tx(1.into(), false).into(), + Call3Value::from(alice.create_counter_tx(2.into(), true)).allow_failure(), + alice.query_counter_value().into(), + alice.create_counter_tx(3.into(), false).into(), + ], + ); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + + assert_eq!( + call_results + .iter() + .map(|result| result.success) + .collect::>(), + [true, false, true, true] + ); + let counter_values: Vec<_> = call_results + .iter() + .filter_map(|result| { + if !result.success { + return None; + } + Some(decode_u256_output(&result.return_data).as_u32()) + }) + .collect(); + assert_eq!(counter_values, [1, 1, 4]); +} + +#[tokio::test] +async fn eth_call_out_of_gas() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let tx_as_call = alice.create_infinite_loop_tx().into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); +} + +#[tokio::test] +async fn eth_call_with_load_test_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + // Deploys (naturally) don't work for calls, hence a separate set of test cases. + let load_test_cases_for_call = [ + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + deploys: 0, + recursive_calls: 20, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + reads: 100, + writes: 100, + ..LoadnextContractExecutionParams::empty() + }, + ]; + + for tx_params in load_test_cases_for_call { + println!("Executing {tx_params:?}"); + let tx_as_call = alice.create_load_test_tx(tx_params).into(); + test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + } +} diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs new file mode 100644 index 00000000000..4528d9cda12 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -0,0 +1,483 @@ +//! Tests for gas estimation (mostly with the real oneshot VM executor). + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; +use zksync_system_constants::CODE_ORACLE_ADDRESS; +use zksync_types::{ + api::state_override::{OverrideAccount, OverrideState}, + web3::keccak256, + K256PrivateKey, +}; +use zksync_utils::bytecode::hash_bytecode; + +use super::*; +use crate::{ + testonly::{StateBuilder, TestAccount}, + tx_sender::gas_estimation::GasEstimator, +}; + +/// Initial pivot multiplier empirically sufficient for most tx types. +const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; + +#[tokio::test] +async fn initial_gas_estimation_is_somewhat_accurate() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let alice = K256PrivateKey::random(); + let transfer_value = U256::from(1_000_000_000); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + assert!(initial_estimate.gas_charged_for_pubdata > 0); + assert!(initial_estimate.operator_overhead > 0); + let total_gas_charged = initial_estimate.total_gas_charged.unwrap(); + assert!( + total_gas_charged + > initial_estimate.gas_charged_for_pubdata + initial_estimate.operator_overhead, + "{initial_estimate:?}" + ); + + // Check that a transaction fails if supplied with the lower bound. + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = total_gas_charged * 64 / 63; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let alice = K256PrivateKey::random(); + // Set the array length in the load test contract to 100, so that reads don't fail. + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn initial_estimate_for_deep_recursion(with_reads: bool) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + // Reads are chosen because they represent the worst case. Reads don't influence the amount of pubdata; + // i.e., they don't make it easier to execute a transaction because of additional gas reserved for pubdata. + // OTOH, reads still increase the amount of computational gas used on each nested call. + // + // Initial pivot multipliers below are the smallest ones with 0.1 precision. `DEFAULT_MULTIPLIER` works for smaller + // recursion depths because the transaction emits enough pubdata to cover gas deductions due to the 63/64 rule. + let depths_and_multipliers: &[_] = if with_reads { + &[(25, DEFAULT_MULTIPLIER), (50, 1.2), (75, 1.4), (100, 1.7)] + } else { + &[ + (50, DEFAULT_MULTIPLIER), + (75, 1.2), + (100, 1.4), + (125, 1.7), + (150, 2.1), + ] + }; + for &(recursion_depth, multiplier) in depths_and_multipliers { + println!("Testing recursion depth {recursion_depth}"); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: recursion_depth, + reads: if with_reads { 10 } else { 0 }, + ..LoadnextContractExecutionParams::empty() + }); + test_initial_estimate(state_override.clone(), tx, multiplier).await; + } +} + +#[tokio::test] +async fn initial_estimate_for_deep_recursion_with_large_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_load_test_contract() + .inflate_bytecode(StateBuilder::LOAD_TEST_ADDRESS, 50_000) + .build(); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: 100, + ..LoadnextContractExecutionParams::empty() + }); + + test_initial_estimate(state_override, tx, 1.35).await; +} + +/// Tests the lower bound and initial pivot extracted from the initial estimate (one with effectively infinite gas amount). +/// Returns the VM result for a VM run with the initial pivot. +async fn test_initial_estimate( + state_override: StateOverride, + tx: L2Tx, + initial_pivot_multiplier: f64, +) -> VmExecutionResultAndLogs { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = + (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + vm_result +} + +async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), block_args, Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + estimator.initialize().await.unwrap_err() +} + +/// Estimates both transactions with initial writes and cleanup. +#[test_casing(4, [10, 50, 200, 1_000])] +#[tokio::test] +async fn initial_estimate_for_expensive_contract(write_count: usize) { + let alice = K256PrivateKey::random(); + let mut state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + + let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { + (*log.log.key.address() == StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .then_some((*log.log.key.key(), log.log.value)) + }); + let contract_logs: HashMap<_, _> = contract_logs.collect(); + assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); + + state_override + .get_mut(&StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::StateDiff(contract_logs)); + let tx = alice.create_expensive_cleanup_tx(); + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[tokio::test] +async fn initial_estimate_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Test contracts that are already decommitted when requested from the precompiles test contract. + let genesis_params = GenesisParams::mock(); + let code_oracle_bytecode = genesis_params + .system_contracts() + .iter() + .find_map(|contract| { + (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) + }) + .expect("no code oracle"); + let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); + let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); + + let warm_bytecode_hashes = [ + (code_oracle_bytecode_hash, code_oracle_keccak_hash), + (contract_bytecode_hash, contract_keccak_hash), + ]; + let mut decomitter_stats = 0.0; + for (hash, keccak_hash) in warm_bytecode_hashes { + println!("Testing bytecode: {hash:?}"); + let tx = alice.create_code_oracle_tx(hash, keccak_hash); + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + let stats = &vm_result.statistics.circuit_statistic; + decomitter_stats = stats.code_decommitter.max(decomitter_stats); + } + assert!(decomitter_stats > 0.0); + + println!("Testing large bytecode"); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + let vm_result = test_initial_estimate(state_override, tx, 1.05).await; + // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones + let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; + assert!( + new_decomitter_stats > decomitter_stats * 1.5, + "old={decomitter_stats}, new={new_decomitter_stats}" + ); +} + +#[tokio::test] +async fn initial_estimate_with_large_free_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_precompiles_contract() + .inflate_bytecode(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS, 50_000) + .build(); + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Ask the test contract to decommit itself. This should refund the decommit costs, but it will be charged at first. + let tx = alice.create_code_oracle_tx(contract_bytecode_hash, contract_keccak_hash); + test_initial_estimate(state_override, tx, 1.05).await; +} + +#[tokio::test] +async fn revert_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let err = test_initial_estimate_error(state_override, tx).await; + let SubmitTxError::ExecutionReverted(err, _) = err else { + panic!("Unexpected error: {err:?}"); + }; + assert_eq!(err, "This method always reverts"); +} + +#[tokio::test] +async fn out_of_gas_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let err = test_initial_estimate_error(state_override, tx).await; + // Unfortunately, we don't provide human-readable out-of-gas errors at the time + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); +} + +#[tokio::test] +async fn insufficient_funds_error_for_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let alice = K256PrivateKey::random(); + let transferred_value = 1_000_000_000.into(); + let tx = alice.create_transfer(transferred_value); + let fee_scale_factor = 1.0; + // Without overrides, the transaction should fail because of insufficient balance. + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args, + fee_scale_factor, + 1_000, + None, + BinarySearchKind::Full, + ) + .await + .unwrap_err(); + assert_matches!( + err, + SubmitTxError::NotEnoughBalanceForFeeValue(balance, fee, value) + if balance.is_zero() && fee.is_zero() && value == transferred_value + ); +} + +async fn test_estimating_gas( + state_override: StateOverride, + tx: L2Tx, + acceptable_overestimation: u64, +) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let fee_scale_factor = 1.0; + let fee = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args.clone(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Full, + ) + .await + .unwrap(); + // Sanity-check gas limit + let gas_limit_after_full_search = u64::try_from(fee.gas_limit).unwrap(); + assert!( + (10_000..10_000_000).contains(&gas_limit_after_full_search), + "{fee:?}" + ); + + let fee = tx_sender + .get_txs_fee_in_wei( + tx.into(), + block_args, + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Optimized, + ) + .await + .unwrap(); + let gas_limit_after_optimized_search = u64::try_from(fee.gas_limit).unwrap(); + + let diff = gas_limit_after_full_search.abs_diff(gas_limit_after_optimized_search); + assert!( + diff <= acceptable_overestimation, + "full={gas_limit_after_full_search}, optimized={gas_limit_after_optimized_search}" + ); +} + +#[test_casing(3, [0, 100, 1_000])] +#[tokio::test] +async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] +#[tokio::test] +async fn estimating_gas_for_load_test_tx( + tx_params: LoadnextContractExecutionParams, + acceptable_overestimation: u64, +) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(4, [10, 50, 100, 200])] +#[tokio::test] +async fn estimating_gas_for_expensive_txs(write_count: usize) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_reverting_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args.clone(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); + } +} + +#[tokio::test] +async fn estimating_gas_for_infinite_loop_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + block_args.clone(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); + } +} diff --git a/core/node/api_server/src/tx_sender/tests/mod.rs b/core/node/api_server/src/tx_sender/tests/mod.rs new file mode 100644 index 00000000000..cacd616202d --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -0,0 +1,166 @@ +//! Tests for the transaction sender. + +use test_casing::TestCases; +use zksync_contracts::test_contracts::LoadnextContractExecutionParams; +use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; +use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_types::{get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; +use zksync_vm_executor::oneshot::MockOneshotExecutor; + +use super::*; +use crate::web3::testonly::create_test_tx_sender; + +mod call; +mod gas_estimation; +mod send_tx; + +const LOAD_TEST_CASES: TestCases = test_casing::cases! {[ + LoadnextContractExecutionParams::default(), + // No storage modification + LoadnextContractExecutionParams { + writes: 0, + events: 0, + ..LoadnextContractExecutionParams::default() + }, + // Moderately deep recursion (very deep recursion is tested separately) + LoadnextContractExecutionParams { + recursive_calls: 10, + ..LoadnextContractExecutionParams::default() + }, + // No deploys + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + // Lots of deploys + LoadnextContractExecutionParams { + deploys: 10, + ..LoadnextContractExecutionParams::default() + }, +]}; + +#[tokio::test] +async fn getting_nonce_for_account() { + let l2_chain_id = L2ChainId::default(); + let test_address = Address::repeat_byte(1); + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + // Manually insert a nonce for the address. + let nonce_key = get_nonce_key(&test_address); + let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(123)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[nonce_log]) + .await + .unwrap(); + + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(123)); + + // Insert another L2 block with a new nonce log. + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(1)) + .await + .unwrap(); + let nonce_log = StorageLog { + value: H256::from_low_u64_be(321), + ..nonce_log + }; + storage + .storage_logs_dal() + .insert_storage_logs(L2BlockNumber(1), &[nonce_log]) + .await + .unwrap(); + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(321)); + let missing_address = Address::repeat_byte(0xff); + let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); + assert_eq!(nonce, Nonce(0)); +} + +#[tokio::test] +async fn getting_nonce_for_account_after_snapshot_recovery() { + const SNAPSHOT_L2_BLOCK_NUMBER: L2BlockNumber = L2BlockNumber(42); + + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + let test_address = Address::repeat_byte(1); + let other_address = Address::repeat_byte(2); + let nonce_logs = [ + StorageLog::new_write_log(get_nonce_key(&test_address), H256::from_low_u64_be(123)), + StorageLog::new_write_log(get_nonce_key(&other_address), H256::from_low_u64_be(25)), + ]; + prepare_recovery_snapshot( + &mut storage, + L1BatchNumber(23), + SNAPSHOT_L2_BLOCK_NUMBER, + &nonce_logs, + ) + .await; + + let l2_chain_id = L2ChainId::default(); + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + storage + .blocks_dal() + .insert_l2_block(&create_l2_block(SNAPSHOT_L2_BLOCK_NUMBER.0 + 1)) + .await + .unwrap(); + let new_nonce_logs = vec![StorageLog::new_write_log( + get_nonce_key(&test_address), + H256::from_low_u64_be(321), + )]; + storage + .storage_logs_dal() + .insert_storage_logs(SNAPSHOT_L2_BLOCK_NUMBER + 1, &new_nonce_logs) + .await + .unwrap(); + + let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); + assert_eq!(nonce, Nonce(321)); + let nonce = tx_sender.get_expected_nonce(other_address).await.unwrap(); + assert_eq!(nonce, Nonce(25)); + let missing_address = Address::repeat_byte(0xff); + let nonce = tx_sender.get_expected_nonce(missing_address).await.unwrap(); + assert_eq!(nonce, Nonce(0)); +} + +async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + drop(storage); + + let genesis_config = genesis_params.config(); + let executor_options = SandboxExecutorOptions::new( + genesis_config.l2_chain_id, + AccountTreeId::new(genesis_config.fee_account), + u32::MAX, + ) + .await + .unwrap(); + + let pg_caches = PostgresStorageCaches::new(1, 1); + let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); + create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) + .await + .0 +} + +async fn pending_block_args(tx_sender: &TxSender) -> BlockArgs { + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + BlockArgs::pending(&mut storage).await.unwrap() +} diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs new file mode 100644 index 00000000000..fdd63254cf0 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -0,0 +1,300 @@ +//! Tests for sending raw transactions. + +use assert_matches::assert_matches; +use test_casing::test_casing; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_fee_model::MockBatchFeeParamsProvider; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::K256PrivateKey; + +use super::*; +use crate::testonly::{StateBuilder, TestAccount}; + +#[tokio::test] +async fn submitting_tx_requires_one_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + let tx_hash = tx.hash(); + + // Manually set sufficient balance for the tx initiator. + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_tx_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { output: vec![] } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let block_args = pending_block_args(&tx_sender).await; + + let submission_result = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(submission_result.0, L2TxSubmissionResult::Added); + + let mut storage = pool.connection().await.unwrap(); + storage + .transactions_web3_dal() + .get_transaction_by_hash(tx_hash, l2_chain_id) + .await + .unwrap() + .expect("transaction is not persisted"); +} + +#[tokio::test] +async fn nonce_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + drop(storage); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let mut tx = create_l2_transaction(55, 555); + + tx_sender.validate_account_nonce(&tx).await.unwrap(); + // There should be some leeway with the nonce validation. + tx.common_data.nonce = Nonce(1); + tx_sender.validate_account_nonce(&tx).await.unwrap(); + + tx.common_data.nonce = Nonce(10_000); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 0 + ); + + let mut storage = pool.connection().await.unwrap(); + let nonce_key = get_nonce_key(&tx.initiator_account()); + let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(42)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[nonce_log]) + .await + .unwrap(); + drop(storage); + + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 42 + ); + + tx.common_data.nonce = Nonce(5); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooLow(from, _, actual) if actual == 5 && from == 42 + ); +} + +#[tokio::test] +async fn fee_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + // Sanity check: validation should succeed with reasonable fee params. + tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap(); + + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = 100.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::IntrinsicGas); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = u64::MAX.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::GasLimitIsTooBig); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_fee_per_gas = 1.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxFeePerGasTooLow); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_priority_fee_per_gas = tx.common_data.fee.max_fee_per_gas * 2; + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxPriorityFeeGreaterThanMaxFee); + } +} + +#[tokio::test] +async fn sending_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + // Manually set sufficient balance for the tx initiator. + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let transfer = alice.create_transfer(1_000_000_000.into()); + let (sub_result, vm_result) = tx_sender.submit_tx(transfer, block_args).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_transfer_with_insufficient_balance() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let transfer = alice.create_transfer(transfer_value); + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NotEnoughBalanceForFeeValue(balance, _, value) if balance.is_zero() + && value == transfer_value + ); +} + +#[tokio::test] +async fn sending_transfer_with_incorrect_signature() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut transfer = alice.create_transfer(transfer_value); + transfer.execute.value = transfer_value / 2; // This should invalidate tx signature + let err = tx_sender.submit_tx(transfer, block_args).await.unwrap_err(); + assert_matches!(err, SubmitTxError::ValidationFailed(_)); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_load_test_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_load_test_tx(tx_params); + let (sub_result, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_reverting_transaction() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_counter_contract(0) + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_counter_tx(1.into(), true); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } if output.to_string().contains("This method always reverts") + ); +} + +#[tokio::test] +async fn sending_transaction_out_of_gas() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let block_args = pending_block_args(&tx_sender).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_infinite_loop_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_infinite_loop_tx(); + let (_, vm_result) = tx_sender.submit_tx(tx, block_args).await.unwrap(); + assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); +} diff --git a/core/node/api_server/src/utils.rs b/core/node/api_server/src/utils.rs index 6769e773dc7..c7a1134682b 100644 --- a/core/node/api_server/src/utils.rs +++ b/core/node/api_server/src/utils.rs @@ -6,9 +6,33 @@ use std::{ time::{Duration, Instant}, }; +use anyhow::Context; use zksync_dal::{Connection, Core, DalError}; +use zksync_multivm::circuit_sequencer_api_latest::boojum::ethereum_types::U256; use zksync_web3_decl::error::Web3Error; +pub(crate) fn prepare_evm_bytecode(raw: &[u8]) -> anyhow::Result> { + // EVM bytecodes are prefixed with a big-endian `U256` bytecode length. + let bytecode_len_bytes = raw.get(..32).context("length < 32")?; + let bytecode_len = U256::from_big_endian(bytecode_len_bytes); + let bytecode_len: usize = bytecode_len + .try_into() + .map_err(|_| anyhow::anyhow!("length ({bytecode_len}) overflow"))?; + let bytecode = raw.get(32..(32 + bytecode_len)).with_context(|| { + format!( + "prefixed length ({bytecode_len}) exceeds real length ({})", + raw.len() - 32 + ) + })?; + // Since slicing above succeeded, this one is safe. + let padding = &raw[(32 + bytecode_len)..]; + anyhow::ensure!( + padding.iter().all(|&b| b == 0), + "bytecode padding contains non-zero bytes" + ); + Ok(bytecode.to_vec()) +} + /// Opens a readonly transaction over the specified connection. pub(crate) async fn open_readonly_transaction<'r>( conn: &'r mut Connection<'_, Core>, @@ -66,3 +90,15 @@ macro_rules! report_filter { ReportFilter::new($interval, &LAST_TIMESTAMP) }}; } + +#[cfg(test)] +mod tests { + use super::*; + use crate::testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}; + + #[test] + fn preparing_evm_bytecode() { + let prepared = prepare_evm_bytecode(RAW_EVM_BYTECODE).unwrap(); + assert_eq!(prepared, PROCESSED_EVM_BYTECODE); + } +} diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs index de763526373..9f5e54a5f4f 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/en.rs @@ -37,6 +37,15 @@ impl EnNamespaceServer for EnNamespace { .map_err(|err| self.current_method().map_err(err)) } + async fn block_metadata( + &self, + block_number: L2BlockNumber, + ) -> RpcResult> { + self.block_metadata_impl(block_number) + .await + .map_err(|err| self.current_method().map_err(err)) + } + async fn sync_tokens(&self, block_number: Option) -> RpcResult> { self.sync_tokens_impl(block_number) .await diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs index 3fdba8e78ce..93f0205c77f 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/eth.rs @@ -4,7 +4,7 @@ use zksync_types::{ Log, Transaction, TransactionId, TransactionReceipt, TransactionVariant, }, transaction_request::CallRequest, - web3::{Bytes, Index, SyncState}, + web3::{Bytes, Index, SyncState, U64Number}, Address, H256, U256, U64, }; use zksync_web3_decl::{ @@ -260,16 +260,20 @@ impl EthNamespaceServer for EthNamespace { async fn fee_history( &self, - block_count: U64, + block_count: U64Number, newest_block: BlockNumber, reward_percentiles: Option>, ) -> RpcResult { self.fee_history_impl( - block_count, + block_count.into(), newest_block, reward_percentiles.unwrap_or_default(), ) .await .map_err(|err| self.current_method().map_err(err)) } + + async fn max_priority_fee_per_gas(&self) -> RpcResult { + Ok(self.max_priority_fee_per_gas_impl()) + } } diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index b55c6ca5946..6ae1fec5b94 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -55,7 +55,7 @@ impl ZksNamespaceServer for ZksNamespace { } async fn get_bridge_contracts(&self) -> RpcResult { - Ok(self.get_bridge_contracts_impl()) + Ok(self.get_bridge_contracts_impl().await) } async fn l1_chain_id(&self) -> RpcResult { diff --git a/core/node/api_server/src/web3/mod.rs b/core/node/api_server/src/web3/mod.rs index bad1b493a5f..620e9185078 100644 --- a/core/node/api_server/src/web3/mod.rs +++ b/core/node/api_server/src/web3/mod.rs @@ -47,6 +47,7 @@ use self::{ use crate::{ execution_sandbox::{BlockStartInfo, VmConcurrencyBarrier}, tx_sender::TxSender, + web3::state::BridgeAddressesHandle, }; pub mod backend_jsonrpsee; @@ -143,7 +144,6 @@ struct OptionalApiParams { #[derive(Debug)] pub struct ApiServer { pool: ConnectionPool, - updaters_pool: ConnectionPool, health_updater: Arc, config: InternalApiConfig, transport: ApiTransport, @@ -153,18 +153,21 @@ pub struct ApiServer { namespaces: Vec, method_tracer: Arc, optional: OptionalApiParams, + bridge_addresses_handle: BridgeAddressesHandle, + sealed_l2_block_handle: SealedL2BlockNumber, } #[derive(Debug)] pub struct ApiBuilder { pool: ConnectionPool, - updaters_pool: ConnectionPool, config: InternalApiConfig, polling_interval: Duration, pruning_info_refresh_interval: Duration, // Mandatory params that must be set using builder methods. transport: Option, tx_sender: Option, + bridge_addresses_handle: Option, + sealed_l2_block_handle: Option, // Optional params that may or may not be set using builder methods. We treat `namespaces` // specially because we want to output a warning if they are not set. namespaces: Option>, @@ -178,13 +181,14 @@ impl ApiBuilder { pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { Self { - updaters_pool: pool.clone(), pool, config, polling_interval: Self::DEFAULT_POLLING_INTERVAL, pruning_info_refresh_interval: Self::DEFAULT_PRUNING_INFO_REFRESH_INTERVAL, transport: None, tx_sender: None, + bridge_addresses_handle: None, + sealed_l2_block_handle: None, namespaces: None, method_tracer: Arc::new(MethodTracer::default()), optional: OptionalApiParams::default(), @@ -201,15 +205,6 @@ impl ApiBuilder { self } - /// Configures a dedicated DB pool to be used for updating different information, - /// such as last mined block number or account nonces. This pool is used to execute - /// in a background task. If not called, the main pool will be used. If the API server is under high load, - /// it may make sense to supply a single-connection pool to reduce pool contention with the API methods. - pub fn with_updaters_pool(mut self, pool: ConnectionPool) -> Self { - self.updaters_pool = pool; - self - } - pub fn with_tx_sender(mut self, tx_sender: TxSender) -> Self { self.tx_sender = Some(tx_sender); self @@ -285,6 +280,22 @@ impl ApiBuilder { self } + pub fn with_sealed_l2_block_handle( + mut self, + sealed_l2_block_handle: SealedL2BlockNumber, + ) -> Self { + self.sealed_l2_block_handle = Some(sealed_l2_block_handle); + self + } + + pub fn with_bridge_addresses_handle( + mut self, + bridge_addresses_handle: BridgeAddressesHandle, + ) -> Self { + self.bridge_addresses_handle = Some(bridge_addresses_handle); + self + } + // Intended for tests only. #[doc(hidden)] fn with_pub_sub_events(mut self, sender: mpsc::UnboundedSender) -> Self { @@ -312,7 +323,6 @@ impl ApiBuilder { Ok(ApiServer { pool: self.pool, health_updater: Arc::new(health_updater), - updaters_pool: self.updaters_pool, config: self.config, transport, tx_sender: self.tx_sender.context("Transaction sender not set")?, @@ -326,6 +336,12 @@ impl ApiBuilder { }), method_tracer: self.method_tracer, optional: self.optional, + sealed_l2_block_handle: self + .sealed_l2_block_handle + .context("Sealed l2 block handle not set")?, + bridge_addresses_handle: self + .bridge_addresses_handle + .context("Bridge addresses handle not set")?, }) } } @@ -335,11 +351,8 @@ impl ApiServer { self.health_updater.subscribe() } - async fn build_rpc_state( - self, - last_sealed_l2_block: SealedL2BlockNumber, - ) -> anyhow::Result { - let mut storage = self.updaters_pool.connection_tagged("api").await?; + async fn build_rpc_state(self) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("api").await?; let start_info = BlockStartInfo::new(&mut storage, self.pruning_info_refresh_interval).await?; drop(storage); @@ -363,7 +376,8 @@ impl ApiServer { api_config: self.config, start_info, mempool_cache: self.optional.mempool_cache, - last_sealed_l2_block, + last_sealed_l2_block: self.sealed_l2_block_handle, + bridge_addresses_handle: self.bridge_addresses_handle, tree_api: self.optional.tree_api, }) } @@ -371,11 +385,10 @@ impl ApiServer { async fn build_rpc_module( self, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, ) -> anyhow::Result> { let namespaces = self.namespaces.clone(); let zksync_network_id = self.config.l2_chain_id; - let rpc_state = self.build_rpc_state(last_sealed_l2_block).await?; + let rpc_state = self.build_rpc_state().await?; // Collect all the methods into a single RPC module. let mut rpc = RpcModule::new(()); @@ -473,21 +486,9 @@ impl ApiServer { self, stop_receiver: watch::Receiver, ) -> anyhow::Result { - // Chosen to be significantly smaller than the interval between L2 blocks, but larger than - // the latency of getting the latest sealed L2 block number from Postgres. If the API server - // processes enough requests, information about the latest sealed L2 block will be updated - // by reporting block difference metrics, so the actual update lag would be much smaller than this value. - const SEALED_L2_BLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25); - let transport = self.transport; + let mut tasks = vec![]; - let (last_sealed_l2_block, sealed_l2_block_update_task) = SealedL2BlockNumber::new( - self.updaters_pool.clone(), - SEALED_L2_BLOCK_UPDATE_INTERVAL, - stop_receiver.clone(), - ); - - let mut tasks = vec![tokio::spawn(sealed_l2_block_update_task)]; let pub_sub = if matches!(transport, ApiTransport::WebSocket(_)) && self.namespaces.contains(&Namespace::Pubsub) { @@ -510,12 +511,8 @@ impl ApiServer { // framework it'll no longer be needed. let health_check = self.health_updater.subscribe(); let (local_addr_sender, local_addr) = oneshot::channel(); - let server_task = tokio::spawn(self.run_jsonrpsee_server( - stop_receiver, - pub_sub, - last_sealed_l2_block, - local_addr_sender, - )); + let server_task = + tokio::spawn(self.run_jsonrpsee_server(stop_receiver, pub_sub, local_addr_sender)); tasks.push(server_task); Ok(ApiServerHandles { @@ -584,7 +581,6 @@ impl ApiServer { self, mut stop_receiver: watch::Receiver, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, local_addr_sender: oneshot::Sender, ) -> anyhow::Result<()> { let transport = self.transport; @@ -640,7 +636,7 @@ impl ApiServer { tracing::info!("Enabled extended call tracing for {transport_str} API server; this might negatively affect performance"); } - let rpc = self.build_rpc_module(pub_sub, last_sealed_l2_block).await?; + let rpc = self.build_rpc_module(pub_sub).await?; let registered_method_names = Arc::new(rpc.method_names().collect::>()); tracing::debug!( "Built RPC module for {transport_str} server with {} methods: {registered_method_names:?}", diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index 71560e4ddb8..726f35ac29a 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -7,7 +7,7 @@ use zksync_types::{ BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, DebugCall, DebugCallType, ResultDebugCall, SupportedTracers, TracerConfig, }, - debug_flat_call::{Action, CallResult, DebugCallFlat}, + debug_flat_call::{Action, CallResult, CallTraceMeta, DebugCallFlat, ResultDebugCallFlat}, l2::L2Tx, transaction_request::CallRequest, web3, H256, U256, @@ -31,8 +31,7 @@ impl DebugNamespace { pub(crate) fn map_call( call: Call, - index: usize, - transaction_hash: H256, + meta: CallTraceMeta, tracer_option: TracerConfig, ) -> CallTracerResult { match tracer_option.tracer { @@ -42,14 +41,13 @@ impl DebugNamespace { )), SupportedTracers::FlatCallTracer => { let mut calls = vec![]; - let mut traces = vec![index]; + let mut traces = vec![meta.index_in_block]; Self::flatten_call( call, &mut calls, &mut traces, tracer_option.tracer_config.only_top_call, - index, - transaction_hash, + &meta, ); CallTracerResult::FlatCallTrace(calls) } @@ -89,8 +87,7 @@ impl DebugNamespace { calls: &mut Vec, trace_address: &mut Vec, only_top_call: bool, - transaction_position: usize, - transaction_hash: H256, + meta: &CallTraceMeta, ) { let subtraces = call.calls.len(); let debug_type = match call.r#type { @@ -120,22 +117,17 @@ impl DebugNamespace { result, subtraces, trace_address: trace_address.clone(), // Clone the current trace address - transaction_position, - transaction_hash, + transaction_position: meta.index_in_block, + transaction_hash: meta.tx_hash, + block_number: meta.block_number, + block_hash: meta.block_hash, r#type: DebugCallType::Call, }); if !only_top_call { for (number, call) in call.calls.into_iter().enumerate() { trace_address.push(number); - Self::flatten_call( - call, - calls, - trace_address, - false, - transaction_position, - transaction_hash, - ); + Self::flatten_call(call, calls, trace_address, false, meta); trace_address.pop(); } } @@ -158,6 +150,7 @@ impl DebugNamespace { let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; + // let block_hash = block_hash self.state. self.current_method() .set_block_diff(self.state.last_sealed_l2_block.diff(block_number)); @@ -172,25 +165,31 @@ impl DebugNamespace { SupportedTracers::CallTracer => CallTracerBlockResult::CallTrace( call_traces .into_iter() - .map(|(call, _, _)| ResultDebugCall { + .map(|(call, _)| ResultDebugCall { result: Self::map_default_call(call, options.tracer_config.only_top_call), }) .collect(), ), SupportedTracers::FlatCallTracer => { - let mut flat_calls = vec![]; - for (call, tx_hash, tx_index) in call_traces { - let mut traces = vec![tx_index]; - Self::flatten_call( - call, - &mut flat_calls, - &mut traces, - options.tracer_config.only_top_call, - tx_index, - tx_hash, - ); - } - CallTracerBlockResult::FlatCallTrace(flat_calls) + let res = call_traces + .into_iter() + .map(|(call, meta)| { + let mut traces = vec![meta.index_in_block]; + let mut flat_calls = vec![]; + Self::flatten_call( + call, + &mut flat_calls, + &mut traces, + options.tracer_config.only_top_call, + &meta, + ); + ResultDebugCallFlat { + tx_hash: meta.tx_hash, + result: flat_calls, + } + }) + .collect(); + CallTracerBlockResult::FlatCallTrace(res) } }; Ok(result) @@ -207,13 +206,8 @@ impl DebugNamespace { .get_call_trace(tx_hash) .await .map_err(DalError::generalize)?; - Ok(call_trace.map(|(call_trace, index_in_block)| { - Self::map_call( - call_trace, - index_in_block, - tx_hash, - options.unwrap_or_default(), - ) + Ok(call_trace.map(|(call_trace, meta)| { + Self::map_call(call_trace, meta, options.unwrap_or_default()) })) } @@ -259,7 +253,11 @@ impl DebugNamespace { }; let call_overrides = request.get_call_overrides()?; - let call = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; + let call = L2Tx::from_request( + request.into(), + MAX_ENCODED_TX_SIZE, + block_args.use_evm_emulator(), + )?; let vm_permit = self .state @@ -301,8 +299,6 @@ impl DebugNamespace { )) } }; - // It's a call request, it's safe to keep it zero - let hash = H256::zero(); let call = Call::new_high_level( call.common_data.fee.gas_limit.as_u64(), result.vm.statistics.gas_used, @@ -312,6 +308,12 @@ impl DebugNamespace { revert_reason, result.call_traces, ); - Ok(Self::map_call(call, 0, hash, options)) + let number = block_args.resolved_block_number(); + let meta = CallTraceMeta { + block_number: number.0, + // It's a call request, it's safe to everything as default + ..Default::default() + }; + Ok(Self::map_call(call, meta, options)) } } diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index a412c064fac..a09a0cb92fc 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -1,5 +1,6 @@ use anyhow::Context as _; use zksync_config::{configs::EcosystemContracts, GenesisConfig}; +use zksync_consensus_roles::validator; use zksync_dal::{CoreDal, DalError}; use zksync_types::{ api::en, protocol_version::ProtocolSemanticVersion, tokens::TokenInfo, Address, L1BatchNumber, @@ -86,6 +87,36 @@ impl EnNamespace { ))) } + #[tracing::instrument(skip(self))] + pub async fn block_metadata_impl( + &self, + block_number: L2BlockNumber, + ) -> Result, Web3Error> { + let Some(meta) = self + .state + .acquire_connection() + .await? + // unwrap is ok, because we start outermost transaction. + .transaction_builder() + .unwrap() + // run readonly transaction to perform consistent reads. + .set_readonly() + .build() + .await + .context("TransactionBuilder::build()")? + .consensus_dal() + .block_metadata(validator::BlockNumber(block_number.0.into())) + .await? + else { + return Ok(None); + }; + Ok(Some(en::BlockMetadata( + zksync_protobuf::serde::Serialize + .proto_fmt(&meta, serde_json::value::Serializer) + .unwrap(), + ))) + } + pub(crate) fn current_method(&self) -> &MethodTracer { &self.state.current_method } @@ -177,6 +208,10 @@ impl EnNamespace { genesis_commitment: Some(genesis_batch.metadata.commitment), bootloader_hash: Some(genesis_batch.header.base_system_contracts_hashes.bootloader), default_aa_hash: Some(genesis_batch.header.base_system_contracts_hashes.default_aa), + evm_emulator_hash: genesis_batch + .header + .base_system_contracts_hashes + .evm_emulator, l1_chain_id: self.state.api_config.l1_chain_id, sl_chain_id: Some(self.state.api_config.l1_chain_id.into()), l2_chain_id: self.state.api_config.l2_chain_id, diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index 65de6cee7fa..ee37cb989f1 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -12,15 +12,16 @@ use zksync_types::{ web3::{self, Bytes, SyncInfo, SyncState}, AccountTreeId, L2BlockNumber, StorageKey, H256, L2_BASE_TOKEN_ADDRESS, U256, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{bytecode::BytecodeMarker, u256_to_h256}; use zksync_web3_decl::{ error::Web3Error, types::{Address, Block, Filter, FilterChanges, Log, U64}, }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, - utils::open_readonly_transaction, + utils::{open_readonly_transaction, prepare_evm_bytecode}, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, }; @@ -77,7 +78,12 @@ impl EthNamespace { drop(connection); let call_overrides = request.get_call_overrides()?; - let tx = L2Tx::from_request(request.into(), self.state.api_config.max_tx_size)?; + let tx = L2Tx::from_request( + request.into(), + self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), + )?; + // It is assumed that the previous checks has already enforced that the `max_fee_per_gas` is at most u64. let call_result: Vec = self .state @@ -107,10 +113,13 @@ impl EthNamespace { let is_eip712 = request_with_gas_per_pubdata_overridden .eip712_meta .is_some(); - + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx: L2Tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // The user may not include the proper transaction type during the estimation of @@ -136,6 +145,7 @@ impl EthNamespace { .tx_sender .get_txs_fee_in_wei( tx.into(), + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -387,7 +397,22 @@ impl EthNamespace { .get_contract_code_unchecked(address, block_number) .await .map_err(DalError::generalize)?; - Ok(contract_code.unwrap_or_default().into()) + let Some(contract_code) = contract_code else { + return Ok(Bytes::default()); + }; + // Check if the bytecode is an EVM bytecode, and if so, pre-process it correspondingly. + let marker = BytecodeMarker::new(contract_code.bytecode_hash); + let prepared_bytecode = if marker == Some(BytecodeMarker::Evm) { + prepare_evm_bytecode(&contract_code.bytecode).with_context(|| { + format!( + "malformed EVM bytecode at address {address:?}, hash = {:?}", + contract_code.bytecode_hash + ) + })? + } else { + contract_code.bytecode + }; + Ok(prepared_bytecode.into()) } pub fn chain_id_impl(&self) -> U64 { @@ -618,10 +643,15 @@ impl EthNamespace { } pub async fn send_raw_transaction_impl(&self, tx_bytes: Bytes) -> Result { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|_| hash).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); @@ -653,7 +683,7 @@ impl EthNamespace { pub async fn fee_history_impl( &self, - block_count: U64, + block_count: u64, newest_block: BlockNumber, reward_percentiles: Vec, ) -> Result { @@ -661,10 +691,7 @@ impl EthNamespace { .set_block_id(BlockId::Number(newest_block)); // Limit `block_count`. - let block_count = block_count - .as_u64() - .min(self.state.api_config.fee_history_limit) - .max(1); + let block_count = block_count.clamp(1, self.state.api_config.fee_history_limit); let mut connection = self.state.acquire_connection().await?; let newest_l2_block = self @@ -836,6 +863,11 @@ impl EthNamespace { } }) } + + pub fn max_priority_fee_per_gas_impl(&self) -> U256 { + // ZKsync does not require priority fee. + 0u64.into() + } } // Bogus methods. diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 34010785c52..f8b374b35ba 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, convert::TryInto}; +use std::collections::HashMap; use anyhow::Context as _; use once_cell::sync::Lazy; @@ -37,6 +37,7 @@ use zksync_web3_decl::{ }; use crate::{ + execution_sandbox::BlockArgs, tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, RpcState}, @@ -81,16 +82,21 @@ impl ZksNamespace { eip712_meta.gas_per_pubdata = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); } + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); let mut tx = L2Tx::from_request( request_with_gas_per_pubdata_overridden.into(), self.state.api_config.max_tx_size, + block_args.use_evm_emulator(), )?; // When we're estimating fee, we are trying to deduce values related to fee, so we should // not consider provided ones. tx.common_data.fee.max_priority_fee_per_gas = 0u64.into(); tx.common_data.fee.gas_per_pubdata_limit = U256::from(DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE); - self.estimate_fee(tx.into(), state_override).await + self.estimate_fee(tx.into(), block_args, state_override) + .await } pub async fn estimate_l1_to_l2_gas_impl( @@ -107,17 +113,25 @@ impl ZksNamespace { } } - let tx: L1Tx = request_with_gas_per_pubdata_overridden - .try_into() - .map_err(Web3Error::SerializationError)?; - - let fee = self.estimate_fee(tx.into(), state_override).await?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let tx = L1Tx::from_request( + request_with_gas_per_pubdata_overridden, + block_args.use_evm_emulator(), + ) + .map_err(Web3Error::SerializationError)?; + + let fee = self + .estimate_fee(tx.into(), block_args, state_override) + .await?; Ok(fee.gas_limit) } async fn estimate_fee( &self, tx: Transaction, + block_args: BlockArgs, state_override: Option, ) -> Result { let scale_factor = self.state.api_config.estimate_gas_scale_factor; @@ -130,6 +144,7 @@ impl ZksNamespace { .tx_sender .get_txs_fee_in_wei( tx, + block_args, scale_factor, acceptable_overestimation as u64, state_override, @@ -150,8 +165,8 @@ impl ZksNamespace { self.state.api_config.l2_testnet_paymaster_addr } - pub fn get_bridge_contracts_impl(&self) -> BridgeAddresses { - self.state.api_config.bridge_addresses.clone() + pub async fn get_bridge_contracts_impl(&self) -> BridgeAddresses { + self.state.bridge_addresses_handle.read().await } pub fn l1_chain_id_impl(&self) -> U64 { @@ -1153,10 +1168,15 @@ impl ZksNamespace { &self, tx_bytes: Bytes, ) -> Result<(H256, VmExecutionResultAndLogs), Web3Error> { - let (mut tx, hash) = self.state.parse_transaction_bytes(&tx_bytes.0)?; + let mut connection = self.state.acquire_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + drop(connection); + let (mut tx, hash) = self + .state + .parse_transaction_bytes(&tx_bytes.0, &block_args)?; tx.set_input(tx_bytes.0, hash); - let submit_result = self.state.tx_sender.submit_tx(tx).await; + let submit_result = self.state.tx_sender.submit_tx(tx, block_args).await; submit_result.map(|result| (hash, result.1)).map_err(|err| { tracing::debug!("Send raw transaction error: {err}"); API_METRICS.submit_tx_error[&err.prom_error_code()].inc(); diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 2ebc1b0c9fa..ff9f7af4a87 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -4,13 +4,13 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::{Duration, Instant}, + time::Instant, }; use anyhow::Context as _; use futures::TryFutureExt; use lru::LruCache; -use tokio::sync::{watch, Mutex}; +use tokio::sync::{Mutex, RwLock}; use vise::GaugeGuard; use zksync_config::{ configs::{api::Web3JsonRpcConfig, ContractsConfig}, @@ -195,51 +195,16 @@ impl InternalApiConfig { /// Thread-safe updatable information about the last sealed L2 block number. /// /// The information may be temporarily outdated and thus should only be used where this is OK -/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`] -/// and on an interval specified when creating an instance. -#[derive(Debug, Clone)] -pub(crate) struct SealedL2BlockNumber(Arc); +/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`]. +#[derive(Debug, Clone, Default)] +pub struct SealedL2BlockNumber(Arc); impl SealedL2BlockNumber { - /// Creates a handle to the last sealed L2 block number together with a task that will update - /// it on a schedule. - pub fn new( - connection_pool: ConnectionPool, - update_interval: Duration, - stop_receiver: watch::Receiver, - ) -> (Self, impl Future>) { - let this = Self(Arc::default()); - let number_updater = this.clone(); - - let update_task = async move { - loop { - if *stop_receiver.borrow() { - tracing::debug!("Stopping latest sealed L2 block updates"); - return Ok(()); - } - - let mut connection = connection_pool.connection_tagged("api").await.unwrap(); - let Some(last_sealed_l2_block) = - connection.blocks_dal().get_sealed_l2_block_number().await? - else { - tokio::time::sleep(update_interval).await; - continue; - }; - drop(connection); - - number_updater.update(last_sealed_l2_block); - tokio::time::sleep(update_interval).await; - } - }; - - (this, update_task) - } - /// Potentially updates the last sealed L2 block number by comparing it to the provided /// sealed L2 block number (not necessarily the last one). /// /// Returns the last sealed L2 block number after the update. - fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { + pub fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { let prev_value = self .0 .fetch_max(maybe_newer_l2_block_number.0, Ordering::Relaxed); @@ -253,7 +218,7 @@ impl SealedL2BlockNumber { /// Returns the difference between the latest L2 block number and the resolved L2 block number /// from `block_args`. - pub fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { + pub(crate) fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { // We compute the difference in any case, since it may update the stored value. let diff = self.diff(block_args.resolved_block_number()); @@ -265,6 +230,23 @@ impl SealedL2BlockNumber { } } +#[derive(Debug, Clone)] +pub struct BridgeAddressesHandle(Arc>); + +impl BridgeAddressesHandle { + pub fn new(bridge_addresses: api::BridgeAddresses) -> Self { + Self(Arc::new(RwLock::new(bridge_addresses))) + } + + pub async fn update(&self, bridge_addresses: api::BridgeAddresses) { + *self.0.write().await = bridge_addresses; + } + + pub async fn read(&self) -> api::BridgeAddresses { + self.0.read().await.clone() + } +} + /// Holder for the data required for the API to be functional. #[derive(Debug, Clone)] pub(crate) struct RpcState { @@ -280,15 +262,23 @@ pub(crate) struct RpcState { pub(super) start_info: BlockStartInfo, pub(super) mempool_cache: Option, pub(super) last_sealed_l2_block: SealedL2BlockNumber, + pub(super) bridge_addresses_handle: BridgeAddressesHandle, } impl RpcState { - pub fn parse_transaction_bytes(&self, bytes: &[u8]) -> Result<(L2Tx, H256), Web3Error> { + pub fn parse_transaction_bytes( + &self, + bytes: &[u8], + block_args: &BlockArgs, + ) -> Result<(L2Tx, H256), Web3Error> { let chain_id = self.api_config.l2_chain_id; let (tx_request, hash) = api::TransactionRequest::from_bytes(bytes, chain_id)?; - Ok(( - L2Tx::from_request(tx_request, self.api_config.max_tx_size)?, + L2Tx::from_request( + tx_request, + self.api_config.max_tx_size, + block_args.use_evm_emulator(), + )?, hash, )) } diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index 93309fc09cf..2d642b9a04b 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -13,7 +13,10 @@ use zksync_types::L2ChainId; use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::{metrics::ApiTransportLabel, *}; -use crate::{execution_sandbox::SandboxExecutor, tx_sender::TxSenderConfig}; +use crate::{ + execution_sandbox::SandboxExecutor, + tx_sender::{SandboxExecutorOptions, TxSenderConfig}, +}; const TEST_TIMEOUT: Duration = Duration::from_secs(90); const POLL_INTERVAL: Duration = Duration::from_millis(50); @@ -103,6 +106,7 @@ pub struct TestServerBuilder { pool: ConnectionPool, api_config: InternalApiConfig, tx_executor: MockOneshotExecutor, + executor_options: Option, method_tracer: Arc, } @@ -113,6 +117,7 @@ impl TestServerBuilder { api_config, pool, tx_executor: MockOneshotExecutor::default(), + executor_options: None, method_tracer: Arc::default(), } } @@ -131,19 +136,17 @@ impl TestServerBuilder { self } + #[must_use] + pub fn with_executor_options(mut self, options: SandboxExecutorOptions) -> Self { + self.executor_options = Some(options); + self + } + /// Builds an HTTP server. pub async fn build_http(self, stop_receiver: watch::Receiver) -> ApiServerHandles { - spawn_server( - ApiTransportLabel::Http, - self.api_config, - self.pool, - None, - self.tx_executor, - self.method_tracer, - stop_receiver, - ) - .await - .0 + self.spawn_server(ApiTransportLabel::Http, None, stop_receiver) + .await + .0 } /// Builds a WS server. @@ -152,60 +155,73 @@ impl TestServerBuilder { websocket_requests_per_minute_limit: Option, stop_receiver: watch::Receiver, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { - spawn_server( + self.spawn_server( ApiTransportLabel::Ws, - self.api_config, - self.pool, websocket_requests_per_minute_limit, - self.tx_executor, - self.method_tracer, stop_receiver, ) .await } -} -async fn spawn_server( - transport: ApiTransportLabel, - api_config: InternalApiConfig, - pool: ConnectionPool, - websocket_requests_per_minute_limit: Option, - tx_executor: MockOneshotExecutor, - method_tracer: Arc, - stop_receiver: watch::Receiver, -) -> (ApiServerHandles, mpsc::UnboundedReceiver) { - let tx_executor = SandboxExecutor::mock(tx_executor).await; - let (tx_sender, vm_barrier) = - create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor).await; - let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); - - let mut namespaces = Namespace::DEFAULT.to_vec(); - namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); - - let server_builder = match transport { - ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), - ApiTransportLabel::Ws => { - let mut builder = ApiBuilder::jsonrpsee_backend(api_config, pool) - .ws(0) - .with_subscriptions_limit(100); - if let Some(websocket_requests_per_minute_limit) = websocket_requests_per_minute_limit { - builder = builder - .with_websocket_requests_per_minute_limit(websocket_requests_per_minute_limit); + async fn spawn_server( + self, + transport: ApiTransportLabel, + websocket_requests_per_minute_limit: Option, + stop_receiver: watch::Receiver, + ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { + let Self { + tx_executor, + executor_options, + pool, + api_config, + method_tracer, + } = self; + + let tx_executor = if let Some(options) = executor_options { + SandboxExecutor::custom_mock(tx_executor, options) + } else { + SandboxExecutor::mock(tx_executor).await + }; + let (tx_sender, vm_barrier) = + create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor).await; + let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); + + let mut namespaces = Namespace::DEFAULT.to_vec(); + namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = + BridgeAddressesHandle::new(api_config.bridge_addresses.clone()); + + let server_builder = match transport { + ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), + ApiTransportLabel::Ws => { + let mut builder = ApiBuilder::jsonrpsee_backend(api_config, pool) + .ws(0) + .with_subscriptions_limit(100); + if let Some(websocket_requests_per_minute_limit) = + websocket_requests_per_minute_limit + { + builder = builder.with_websocket_requests_per_minute_limit( + websocket_requests_per_minute_limit, + ); + } + builder } - builder - } - }; - let server_handles = server_builder - .with_polling_interval(POLL_INTERVAL) - .with_tx_sender(tx_sender) - .with_vm_barrier(vm_barrier) - .with_pub_sub_events(pub_sub_events_sender) - .with_method_tracer(method_tracer) - .enable_api_namespaces(namespaces) - .build() - .expect("Unable to build API server") - .run(stop_receiver) - .await - .expect("Failed spawning JSON-RPC server"); - (server_handles, pub_sub_events_receiver) + }; + let server_handles = server_builder + .with_polling_interval(POLL_INTERVAL) + .with_tx_sender(tx_sender) + .with_vm_barrier(vm_barrier) + .with_pub_sub_events(pub_sub_events_sender) + .with_method_tracer(method_tracer) + .enable_api_namespaces(namespaces) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle) + .build() + .expect("Unable to build API server") + .run(stop_receiver) + .await + .expect("Failed spawning JSON-RPC server"); + (server_handles, pub_sub_events_receiver) + } } diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index 4f021b777ae..28a22511fa9 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -139,32 +139,27 @@ impl HttpTest for TraceBlockFlatTest { .await? .unwrap_flat(); - // A transaction with 2 nested calls will convert into 3 Flattened calls. - // Also in this test, all tx have the same # of nested calls - assert_eq!( - block_traces.len(), - tx_results.len() * (tx_results[0].call_traces.len() + 1) - ); + assert_eq!(block_traces.len(), tx_results.len()); + + let tx_traces = &block_traces.first().unwrap().result; // First tx has 2 nested calls, thus 2 sub-traces - assert_eq!(block_traces[0].subtraces, 2); - assert_eq!(block_traces[0].trace_address, [0]); + assert_eq!(tx_traces[0].subtraces, 2); + assert_eq!(tx_traces[0].trace_address, [0]); // Second flat-call (fist nested call) do not have nested calls - assert_eq!(block_traces[1].subtraces, 0); - assert_eq!(block_traces[1].trace_address, [0, 0]); + assert_eq!(tx_traces[1].subtraces, 0); + assert_eq!(tx_traces[1].trace_address, [0, 0]); - let top_level_call_indexes = [0, 3, 6]; + let top_level_call_indexes = [0, 1, 2]; let top_level_traces = top_level_call_indexes .iter() .map(|&i| block_traces[i].clone()); for (top_level_trace, tx_result) in top_level_traces.zip(&tx_results) { - assert_eq!(top_level_trace.action.from, Address::zero()); - assert_eq!(top_level_trace.action.to, BOOTLOADER_ADDRESS); - assert_eq!( - top_level_trace.action.gas, - tx_result.transaction.gas_limit() - ); + let trace = top_level_trace.result.first().unwrap(); + assert_eq!(trace.action.from, Address::zero()); + assert_eq!(trace.action.to, BOOTLOADER_ADDRESS); + assert_eq!(trace.action.gas, tx_result.transaction.gas_limit()); } // TODO: test inner calls } diff --git a/core/node/api_server/src/web3/tests/filters.rs b/core/node/api_server/src/web3/tests/filters.rs index 7342ce7e979..c865526815d 100644 --- a/core/node/api_server/src/web3/tests/filters.rs +++ b/core/node/api_server/src/web3/tests/filters.rs @@ -23,7 +23,7 @@ impl HttpTest for BasicFilterChangesTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -109,7 +109,7 @@ impl HttpTest for LogFilterChangesTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 632e263c653..27932931880 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -16,6 +16,7 @@ use zksync_config::{ }, GenesisConfig, }; +use zksync_contracts::BaseSystemContracts; use zksync_dal::{transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, CoreDal}; use zksync_multivm::interface::{ TransactionExecutionMetrics, TransactionExecutionResult, TxExecutionStatus, VmEvent, @@ -31,17 +32,22 @@ use zksync_system_constants::{ }; use zksync_types::{ api, - block::{pack_block_info, L2BlockHeader}, + block::{pack_block_info, L2BlockHasher, L2BlockHeader}, + fee_model::{BatchFeeInput, FeeParams}, get_nonce_key, l2::L2Tx, storage::get_code_key, + system_contracts::get_system_smart_contracts, tokens::{TokenInfo, TokenMetadata}, tx::IncludedTxLocation, utils::{storage_key_for_eth_balance, storage_key_for_standard_token_balance}, AccountTreeId, Address, L1BatchNumber, Nonce, ProtocolVersionId, StorageKey, StorageLog, H256, U256, U64, }; -use zksync_utils::u256_to_h256; +use zksync_utils::{ + bytecode::{hash_bytecode, hash_evm_bytecode}, + u256_to_h256, +}; use zksync_vm_executor::oneshot::MockOneshotExecutor; use zksync_web3_decl::{ client::{Client, DynClient, L2}, @@ -50,7 +56,7 @@ use zksync_web3_decl::{ http_client::HttpClient, rpc_params, types::{ - error::{ErrorCode, OVERSIZED_RESPONSE_CODE}, + error::{ErrorCode, INVALID_PARAMS_CODE, OVERSIZED_RESPONSE_CODE}, ErrorObjectOwned, }, }, @@ -58,7 +64,11 @@ use zksync_web3_decl::{ }; use super::*; -use crate::web3::testonly::TestServerBuilder; +use crate::{ + testonly::{PROCESSED_EVM_BYTECODE, RAW_EVM_BYTECODE}, + tx_sender::SandboxExecutorOptions, + web3::testonly::TestServerBuilder, +}; mod debug; mod filters; @@ -134,13 +144,18 @@ async fn setting_response_size_limits() { trait HttpTest: Send + Sync { /// Prepares the storage before the server is started. The default implementation performs genesis. fn storage_initialization(&self) -> StorageInitialization { - StorageInitialization::Genesis + StorageInitialization::genesis() } fn transaction_executor(&self) -> MockOneshotExecutor { MockOneshotExecutor::default() } + /// Allows to override sandbox executor options. + fn executor_options(&self) -> Option { + None + } + fn method_tracer(&self) -> Arc { Arc::default() } @@ -157,7 +172,9 @@ trait HttpTest: Send + Sync { /// Storage initialization strategy. #[derive(Debug)] enum StorageInitialization { - Genesis, + Genesis { + evm_emulator: bool, + }, Recovery { logs: Vec, factory_deps: HashMap>, @@ -168,6 +185,16 @@ impl StorageInitialization { const SNAPSHOT_RECOVERY_BATCH: L1BatchNumber = L1BatchNumber(23); const SNAPSHOT_RECOVERY_BLOCK: L2BlockNumber = L2BlockNumber(23); + const fn genesis() -> Self { + Self::Genesis { + evm_emulator: false, + } + } + + const fn genesis_with_evm() -> Self { + Self::Genesis { evm_emulator: true } + } + fn empty_recovery() -> Self { Self::Recovery { logs: vec![], @@ -181,12 +208,29 @@ impl StorageInitialization { storage: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { match self { - Self::Genesis => { - let params = GenesisParams::load_genesis_params(GenesisConfig { + Self::Genesis { evm_emulator } => { + let mut config = GenesisConfig { l2_chain_id: network_config.zksync_network_id, ..mock_genesis_config() - }) + }; + let mut base_system_contracts = BaseSystemContracts::load_from_disk(); + if evm_emulator { + config.evm_emulator_hash = Some(config.default_aa_hash.unwrap()); + base_system_contracts.evm_emulator = + Some(base_system_contracts.default_aa.clone()); + } else { + assert!(config.evm_emulator_hash.is_none()); + } + + let params = GenesisParams::from_genesis_config( + config, + base_system_contracts, + // We cannot load system contracts with EVM emulator yet because these contracts are missing. + // This doesn't matter for tests because the EVM emulator won't be invoked. + get_system_smart_contracts(false), + ) .unwrap(); + if storage.blocks_dal().is_genesis_needed().await? { insert_genesis_batch(storage, ¶ms).await?; } @@ -245,11 +289,13 @@ async fn test_http_server(test: impl HttpTest) { let genesis = GenesisConfig::for_tests(); let mut api_config = InternalApiConfig::new(&web3_config, &contracts_config, &genesis); api_config.filters_disabled = test.filters_disabled(); - let mut server_handles = TestServerBuilder::new(pool.clone(), api_config) + let mut server_builder = TestServerBuilder::new(pool.clone(), api_config) .with_tx_executor(test.transaction_executor()) - .with_method_tracer(test.method_tracer()) - .build_http(stop_receiver) - .await; + .with_method_tracer(test.method_tracer()); + if let Some(executor_options) = test.executor_options() { + server_builder = server_builder.with_executor_options(executor_options); + } + let mut server_handles = server_builder.build_http(stop_receiver).await; let local_addr = server_handles.wait_until_ready().await; let client = Client::http(format!("http://{local_addr}/").parse().unwrap()) @@ -428,6 +474,10 @@ async fn store_events( Ok((tx_location, events)) } +fn scaled_sensible_fee_input(scale: f64) -> BatchFeeInput { + FeeParams::sensible_v1_default().scale(scale, scale) +} + #[derive(Debug)] struct HttpServerBasicsTest; @@ -625,7 +675,7 @@ impl HttpTest for StorageAccessWithSnapshotRecovery { fn storage_initialization(&self) -> StorageInitialization { let address = Address::repeat_byte(1); let code_key = get_code_key(&address); - let code_hash = H256::repeat_byte(2); + let code_hash = hash_bytecode(&[0; 32]); let balance_key = storage_key_for_eth_balance(&address); let logs = vec![ StorageLog::new_write_log(code_key, code_hash), @@ -1102,3 +1152,241 @@ impl HttpTest for GenesisConfigTest { async fn tracing_genesis_config() { test_http_server(GenesisConfigTest).await; } + +#[derive(Debug)] +struct GetBytecodeTest; + +impl GetBytecodeTest { + async fn insert_evm_bytecode( + connection: &mut Connection<'_, Core>, + at_block: L2BlockNumber, + address: Address, + ) -> anyhow::Result<()> { + let evm_bytecode_hash = hash_evm_bytecode(RAW_EVM_BYTECODE); + let code_log = StorageLog::new_write_log(get_code_key(&address), evm_bytecode_hash); + connection + .storage_logs_dal() + .append_storage_logs(at_block, &[code_log]) + .await?; + + let factory_deps = HashMap::from([(evm_bytecode_hash, RAW_EVM_BYTECODE.to_vec())]); + connection + .factory_deps_dal() + .insert_factory_deps(at_block, &factory_deps) + .await?; + Ok(()) + } +} + +#[async_trait] +impl HttpTest for GetBytecodeTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let genesis_evm_address = Address::repeat_byte(1); + let mut connection = pool.connection().await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(0), genesis_evm_address).await?; + + for contract in get_system_smart_contracts(false) { + let bytecode = client + .get_code(*contract.account_id.address(), None) + .await?; + assert_eq!(bytecode.0, contract.bytecode); + } + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let latest_block_variants = [ + api::BlockNumber::Pending, + api::BlockNumber::Latest, + api::BlockNumber::Committed, + ]; + let latest_block_variants = latest_block_variants.map(api::BlockIdVariant::BlockNumber); + + let genesis_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Earliest), + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(0.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + }), + ]; + for at_block in latest_block_variants + .into_iter() + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 0"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + + // Create another block with an EVM bytecode. + let new_bytecode_address = Address::repeat_byte(2); + let mut connection = pool.connection().await?; + let block_header = store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; + Self::insert_evm_bytecode(&mut connection, L2BlockNumber(1), new_bytecode_address).await?; + + let bytecode = client.get_code(genesis_evm_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + let bytecode = client.get_code(new_bytecode_address, None).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + + let new_block_variants = [ + api::BlockIdVariant::BlockNumber(api::BlockNumber::Number(1.into())), + api::BlockIdVariant::BlockHashObject(api::BlockHashObject { + block_hash: block_header.hash, + }), + ]; + for at_block in latest_block_variants.into_iter().chain(new_block_variants) { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + for at_block in genesis_block_variants { + println!("Testing {at_block:?} with new EVM code, latest block: 1"); + let bytecode = client + .get_code(new_bytecode_address, Some(at_block)) + .await?; + assert!(bytecode.0.is_empty()); + } + + for at_block in latest_block_variants + .into_iter() + .chain(new_block_variants) + .chain(genesis_block_variants) + { + println!("Testing {at_block:?} with genesis EVM code, latest block: 1"); + let bytecode = client.get_code(genesis_evm_address, Some(at_block)).await?; + assert_eq!(bytecode.0, PROCESSED_EVM_BYTECODE); + } + Ok(()) + } +} + +#[tokio::test] +async fn getting_bytecodes() { + test_http_server(GetBytecodeTest).await; +} + +#[derive(Debug)] +struct FeeHistoryTest; + +#[async_trait] +impl HttpTest for FeeHistoryTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut connection = pool.connection().await?; + let block1 = L2BlockHeader { + batch_fee_input: scaled_sensible_fee_input(1.0), + base_fee_per_gas: 100, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block1, &[]).await?; + let block2 = L2BlockHeader { + batch_fee_input: scaled_sensible_fee_input(2.0), + base_fee_per_gas: 200, + ..create_l2_block(2) + }; + store_custom_l2_block(&mut connection, &block2, &[]).await?; + + let all_pubdata_prices = [ + 0, + block1.batch_fee_input.fair_pubdata_price(), + block2.batch_fee_input.fair_pubdata_price(), + ] + .map(U256::from); + + let history = client + .fee_history(1_000.into(), api::BlockNumber::Latest, Some(vec![])) + .await?; + assert_eq!(history.inner.oldest_block, 0.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [0, 100, 200, 200].map(U256::from) // The latest value is duplicated + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices); + // Values below are not filled. + assert_eq!(history.inner.gas_used_ratio, [0.0; 3]); + assert_eq!(history.inner.base_fee_per_blob_gas, [U256::zero(); 4]); + assert_eq!(history.inner.blob_gas_used_ratio, [0.0; 3]); + + // Check supplying hexadecimal block count + let hex_history: api::FeeHistory = client + .request( + "eth_feeHistory", + rpc_params!["0xaa", "latest", [] as [f64; 0]], + ) + .await?; + assert_eq!(hex_history, history); + + // ...and explicitly decimal count (which should've been supplied in the first call) for exhaustiveness + let dec_history: api::FeeHistory = client + .request( + "eth_feeHistory", + rpc_params![1_000, "latest", [] as [f64; 0]], + ) + .await?; + assert_eq!(dec_history, history); + + // Check partial histories: blocks 0..=1 + let history = client + .fee_history( + 1_000.into(), + api::BlockNumber::Number(1.into()), + Some(vec![]), + ) + .await?; + assert_eq!(history.inner.oldest_block, 0.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [0, 100, 100].map(U256::from) + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[..2]); + + // Blocks 1..=2 + let history = client + .fee_history(2.into(), api::BlockNumber::Latest, Some(vec![])) + .await?; + assert_eq!(history.inner.oldest_block, 1.into()); + assert_eq!( + history.inner.base_fee_per_gas, + [100, 200, 200].map(U256::from) + ); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[1..]); + + // Blocks 1..=1 + let history = client + .fee_history(1.into(), api::BlockNumber::Number(1.into()), Some(vec![])) + .await?; + assert_eq!(history.inner.oldest_block, 1.into()); + assert_eq!(history.inner.base_fee_per_gas, [100, 100].map(U256::from)); + assert_eq!(history.l2_pubdata_price, all_pubdata_prices[1..2]); + + // Non-existing newest block. + let err = client + .fee_history( + 1000.into(), + api::BlockNumber::Number(100.into()), + Some(vec![]), + ) + .await + .unwrap_err(); + assert_matches!( + err, + ClientError::Call(err) if err.code() == INVALID_PARAMS_CODE + ); + Ok(()) + } +} + +#[tokio::test] +async fn getting_fee_history() { + test_http_server(FeeHistoryTest).await; +} diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs index 1d425f8b951..e814081afa0 100644 --- a/core/node/api_server/src/web3/tests/unstable.rs +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -27,14 +27,9 @@ impl HttpTest for GetTeeProofsTest { assert!(proof.is_empty()); - let mut storage = pool.connection().await.unwrap(); - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(batch_no) - .await?; - let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut storage = pool.connection().await.unwrap(); let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); tee_proof_generation_dal .save_attestation(&pubkey, &attestation) diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index d8086c6c6ad..45128f579cd 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -9,20 +9,21 @@ use std::{ }; use api::state_override::{OverrideAccount, StateOverride}; +use test_casing::test_casing; +use zksync_contracts::{BaseSystemContracts, BaseSystemContractsHashes}; use zksync_multivm::interface::{ - ExecutionResult, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, + ExecutionResult, OneshotEnv, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, }; -use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - api::ApiStorageLog, - fee_model::{BatchFeeInput, FeeParams}, - get_intrinsic_constants, - transaction_request::CallRequest, - K256PrivateKey, L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, - U256, + api::ApiStorageLog, fee_model::BatchFeeInput, get_intrinsic_constants, + transaction_request::CallRequest, K256PrivateKey, L2ChainId, PackedEthSignature, + StorageLogKind, StorageLogWithPreviousValue, Transaction, U256, }; use zksync_utils::u256_to_h256; -use zksync_vm_executor::oneshot::MockOneshotExecutor; +use zksync_vm_executor::oneshot::{ + BaseSystemContractsProvider, ContractsKind, MockOneshotExecutor, OneshotEnvParameters, + ResolvedBlockInfo, +}; use zksync_web3_decl::namespaces::DebugNamespaceClient; use super::*; @@ -42,11 +43,7 @@ impl ExpectedFeeInput { fn expect_for_block(&self, number: api::BlockNumber, scale: f64) { *self.0.lock().unwrap() = match number { api::BlockNumber::Number(number) => create_l2_block(number.as_u32()).batch_fee_input, - _ => ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - scale, - scale, - ), + _ => scaled_sensible_fee_input(scale), }; } @@ -69,6 +66,59 @@ impl ExpectedFeeInput { } } +/// Mock base contracts provider. Necessary to use with EVM emulator because bytecode of the real emulator is not available yet. +#[derive(Debug)] +struct BaseContractsWithMockEvmEmulator(BaseSystemContracts); + +impl Default for BaseContractsWithMockEvmEmulator { + fn default() -> Self { + let mut contracts = BaseSystemContracts::load_from_disk(); + contracts.evm_emulator = Some(contracts.default_aa.clone()); + Self(contracts) + } +} + +#[async_trait] +impl BaseSystemContractsProvider for BaseContractsWithMockEvmEmulator { + async fn base_system_contracts( + &self, + block_info: &ResolvedBlockInfo, + ) -> anyhow::Result { + assert!(block_info.use_evm_emulator()); + Ok(self.0.clone()) + } +} + +fn executor_options_with_evm_emulator() -> SandboxExecutorOptions { + let base_contracts = Arc::::default(); + SandboxExecutorOptions { + estimate_gas: OneshotEnvParameters::new( + base_contracts.clone(), + L2ChainId::default(), + AccountTreeId::default(), + u32::MAX, + ), + eth_call: OneshotEnvParameters::new( + base_contracts, + L2ChainId::default(), + AccountTreeId::default(), + u32::MAX, + ), + } +} + +/// Fetches base contract hashes from the genesis block. +async fn genesis_contract_hashes( + connection: &mut Connection<'_, Core>, +) -> anyhow::Result { + Ok(connection + .blocks_dal() + .get_l2_block_header(L2BlockNumber(0)) + .await? + .context("no genesis block")? + .base_system_contracts_hashes) +} + #[derive(Debug, Default)] struct CallTest { fee_input: ExpectedFeeInput, @@ -165,28 +215,108 @@ impl HttpTest for CallTest { // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. let mut block_header = create_l2_block(2); - block_header.batch_fee_input = - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 2.5, - 2.5, - ); + block_header.batch_fee_input = scaled_sensible_fee_input(2.5); store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); - let call_request = CallTest::call_request(b"block=3"); - let call_result = client.call(call_request.clone(), None, None).await?; + let call_request = Self::call_request(b"block=3"); + let call_result = client.call(call_request, None, None).await?; assert_eq!(call_result.0, b"output"); + let call_request_without_target = CallRequest { + to: None, + ..Self::call_request(b"block=3") + }; + let err = client + .call(call_request_without_target, None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) } } +fn assert_null_to_address_error(error: &ClientError) { + if let ClientError::Call(error) = error { + assert_eq!(error.code(), 3); + assert!(error.message().contains("toAddressIsNull"), "{error:?}"); + assert!(error.data().is_none(), "{error:?}"); + } else { + panic!("Unexpected error: {error:?}"); + } +} + #[tokio::test] async fn call_method_basics() { test_http_server(CallTest::default()).await; } +fn evm_emulator_responses(tx: &Transaction, env: &OneshotEnv) -> ExecutionResult { + assert!(env + .system + .base_system_smart_contracts + .evm_emulator + .is_some()); + match tx.execute.calldata.as_slice() { + b"no_target" => assert_eq!(tx.recipient_account(), None), + _ => assert!(tx.recipient_account().is_some()), + } + ExecutionResult::Success { + output: b"output".to_vec(), + } +} + +#[derive(Debug)] +struct CallTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for CallTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_call_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + let block_header = L2BlockHeader { + base_system_contracts_hashes: genesis_contract_hashes(&mut connection).await?, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + + let call_result = client.call(CallTest::call_request(&[]), None, None).await?; + assert_eq!(call_result.0, b"output"); + + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"no_target") + }; + let call_result = client.call(call_request_without_target, None, None).await?; + assert_eq!(call_result.0, b"output"); + Ok(()) + } +} + +#[tokio::test] +async fn call_method_with_evm_emulator() { + test_http_server(CallTestWithEvmEmulator).await; +} + #[derive(Debug, Default)] struct CallTestAfterSnapshotRecovery { fee_input: ExpectedFeeInput, @@ -257,16 +387,20 @@ struct SendRawTransactionTest { } impl SendRawTransactionTest { - fn transaction_bytes_and_hash() -> (Vec, H256) { + fn transaction_bytes_and_hash(include_to: bool) -> (Vec, H256) { let private_key = Self::private_key(); let tx_request = api::TransactionRequest { chain_id: Some(L2ChainId::default().as_u64()), from: Some(private_key.address()), - to: Some(Address::repeat_byte(2)), + to: include_to.then(|| Address::repeat_byte(2)), value: 123_456.into(), gas: (get_intrinsic_constants().l2_tx_intrinsic_gas * 2).into(), gas_price: StateKeeperConfig::for_tests().minimal_l2_gas_price.into(), - input: vec![1, 2, 3, 4].into(), + input: if include_to { + vec![1, 2, 3, 4].into() + } else { + b"no_target".to_vec().into() + }, ..api::TransactionRequest::default() }; let data = tx_request.get_rlp().unwrap(); @@ -301,7 +435,7 @@ impl HttpTest for SendRawTransactionTest { factory_deps: HashMap::default(), } } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -313,7 +447,7 @@ impl HttpTest for SendRawTransactionTest { L2BlockNumber(1) }; tx_executor.set_tx_responses(move |tx, env| { - assert_eq!(tx.hash(), Self::transaction_bytes_and_hash().1); + assert_eq!(tx.hash(), Self::transaction_bytes_and_hash(true).1); assert_eq!(env.l1_batch.first_l2_block.number, pending_block.0); ExecutionResult::Success { output: vec![] } }); @@ -334,7 +468,7 @@ impl HttpTest for SendRawTransactionTest { .await?; } - let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = Self::transaction_bytes_and_hash(true); let send_result = client.send_raw_transaction(tx_bytes.into()).await?; assert_eq!(send_result, tx_hash); Ok(()) @@ -357,6 +491,90 @@ async fn send_raw_transaction_after_snapshot_recovery() { .await; } +#[derive(Debug)] +struct SendRawTransactionWithoutToAddressTest; + +#[async_trait] +impl HttpTest for SendRawTransactionWithoutToAddressTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut storage = pool.connection().await?; + storage + .storage_logs_dal() + .append_storage_logs( + L2BlockNumber(0), + &[SendRawTransactionTest::balance_storage_log()], + ) + .await?; + + let (tx_bytes, _) = SendRawTransactionTest::transaction_bytes_and_hash(false); + let err = client + .send_raw_transaction(tx_bytes.into()) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[tokio::test] +async fn send_raw_transaction_fails_without_to_address() { + test_http_server(SendRawTransactionWithoutToAddressTest).await; +} + +#[derive(Debug)] +struct SendRawTransactionTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for SendRawTransactionTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_tx_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Manually set sufficient balance for the transaction account. + let mut storage = pool.connection().await?; + storage + .storage_logs_dal() + .append_storage_logs( + L2BlockNumber(0), + &[SendRawTransactionTest::balance_storage_log()], + ) + .await?; + + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(true); + let send_result = client.send_raw_transaction(tx_bytes.into()).await?; + assert_eq!(send_result, tx_hash); + + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(false); + let send_result = client.send_raw_transaction(tx_bytes.into()).await?; + assert_eq!(send_result, tx_hash); + Ok(()) + } +} + +#[tokio::test] +async fn send_raw_transaction_with_evm_emulator() { + test_http_server(SendRawTransactionTestWithEvmEmulator).await; +} + #[derive(Debug)] struct SendTransactionWithDetailedOutputTest; @@ -405,7 +623,7 @@ impl SendTransactionWithDetailedOutputTest { impl HttpTest for SendTransactionWithDetailedOutputTest { fn transaction_executor(&self) -> MockOneshotExecutor { let mut tx_executor = MockOneshotExecutor::default(); - let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(); + let tx_bytes_and_hash = SendRawTransactionTest::transaction_bytes_and_hash(true); let vm_execution_logs = VmExecutionLogs { storage_logs: self.storage_logs(), events: self.vm_events(), @@ -423,6 +641,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { logs: vm_execution_logs.clone(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, } }); tx_executor @@ -443,7 +662,7 @@ impl HttpTest for SendTransactionWithDetailedOutputTest { ) .await?; - let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(); + let (tx_bytes, tx_hash) = SendRawTransactionTest::transaction_bytes_and_hash(true); let send_result = client .send_raw_transaction_with_detailed_output(tx_bytes.into()) .await?; @@ -562,12 +781,7 @@ impl HttpTest for TraceCallTest { // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. let mut block_header = create_l2_block(2); - block_header.batch_fee_input = - ::default_batch_fee_input_scaled( - FeeParams::sensible_v1_default(), - 3.0, - 3.0, - ); + block_header.batch_fee_input = scaled_sensible_fee_input(3.0); store_custom_l2_block(&mut connection, &block_header, &[]).await?; // Fee input is not scaled further as per `ApiFeeInputProvider` implementation self.fee_input.expect_custom(block_header.batch_fee_input); @@ -575,6 +789,16 @@ impl HttpTest for TraceCallTest { let call_result = client.trace_call(call_request.clone(), None, None).await?; Self::assert_debug_call(&call_request, &call_result.unwrap_default()); + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"block=3") + }; + let err = client + .call(call_request_without_target, None, None) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) } } @@ -651,16 +875,96 @@ async fn trace_call_after_snapshot_recovery() { test_http_server(TraceCallTestAfterSnapshotRecovery::default()).await; } +#[derive(Debug)] +struct TraceCallTestWithEvmEmulator; + +#[async_trait] +impl HttpTest for TraceCallTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_call_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. + let mut connection = pool.connection().await?; + let block_header = L2BlockHeader { + base_system_contracts_hashes: genesis_contract_hashes(&mut connection).await?, + ..create_l2_block(1) + }; + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + + client + .trace_call(CallTest::call_request(&[]), None, None) + .await?; + + let call_request_without_target = CallRequest { + to: None, + ..CallTest::call_request(b"no_target") + }; + client + .trace_call(call_request_without_target, None, None) + .await?; + Ok(()) + } +} + +#[tokio::test] +async fn trace_call_method_with_evm_emulator() { + test_http_server(TraceCallTestWithEvmEmulator).await; +} + +#[derive(Debug, Clone, Copy)] +enum EstimateMethod { + EthEstimateGas, + ZksEstimateFee, + ZksEstimateGasL1ToL2, +} + +impl EstimateMethod { + const ALL: [Self; 3] = [ + Self::EthEstimateGas, + Self::ZksEstimateFee, + Self::ZksEstimateGasL1ToL2, + ]; + + async fn query(self, client: &DynClient, req: CallRequest) -> Result { + match self { + Self::EthEstimateGas => client.estimate_gas(req, None, None).await, + Self::ZksEstimateFee => client + .estimate_fee(req, None) + .await + .map(|fee| fee.gas_limit), + Self::ZksEstimateGasL1ToL2 => client.estimate_gas_l1_to_l2(req, None).await, + } + } +} + #[derive(Debug)] struct EstimateGasTest { gas_limit_threshold: Arc, + method: EstimateMethod, snapshot_recovery: bool, } impl EstimateGasTest { - fn new(snapshot_recovery: bool) -> Self { + fn new(method: EstimateMethod, snapshot_recovery: bool) -> Self { Self { gas_limit_threshold: Arc::default(), + method, snapshot_recovery, } } @@ -681,9 +985,12 @@ impl HttpTest for EstimateGasTest { L2BlockNumber(1) }; let gas_limit_threshold = self.gas_limit_threshold.clone(); + let should_set_nonce = !matches!(self.method, EstimateMethod::ZksEstimateGasL1ToL2); tx_executor.set_tx_responses(move |tx, env| { assert_eq!(tx.execute.calldata(), [] as [u8; 0]); - assert_eq!(tx.nonce(), Some(Nonce(0))); + if should_set_nonce { + assert_eq!(tx.nonce(), Some(Nonce(0))); + } assert_eq!(env.l1_batch.first_l2_block.number, pending_block_number.0); let gas_limit_threshold = gas_limit_threshold.load(Ordering::SeqCst); @@ -706,8 +1013,9 @@ impl HttpTest for EstimateGasTest { let l2_transaction = create_l2_transaction(10, 100); for threshold in [10_000, 50_000, 100_000, 1_000_000] { self.gas_limit_threshold.store(threshold, Ordering::Relaxed); - let output = client - .estimate_gas(l2_transaction.clone().into(), None, None) + let output = self + .method + .query(client, l2_transaction.clone().into()) .await?; assert!( output >= U256::from(threshold), @@ -732,19 +1040,17 @@ impl HttpTest for EstimateGasTest { let mut call_request = CallRequest::from(l2_transaction); call_request.from = Some(SendRawTransactionTest::private_key().address()); call_request.value = Some(1_000_000.into()); - client - .estimate_gas(call_request.clone(), None, None) - .await?; + + self.method.query(client, call_request.clone()).await?; call_request.value = Some(U256::max_value()); - let error = client - .estimate_gas(call_request, None, None) - .await - .unwrap_err(); + let error = self.method.query(client, call_request).await.unwrap_err(); if let ClientError::Call(error) = error { let error_msg = error.message(); + // L1 and L2 transactions have differing error messages in this case. assert!( - error_msg.to_lowercase().contains("insufficient"), + error_msg.to_lowercase().contains("insufficient") + || error_msg.to_lowercase().contains("overflow"), "{error_msg}" ); } else { @@ -754,14 +1060,16 @@ impl HttpTest for EstimateGasTest { } } +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_basics() { - test_http_server(EstimateGasTest::new(false)).await; +async fn estimate_gas_basics(method: EstimateMethod) { + test_http_server(EstimateGasTest::new(method, false)).await; } +#[test_casing(3, EstimateMethod::ALL)] #[tokio::test] -async fn estimate_gas_after_snapshot_recovery() { - test_http_server(EstimateGasTest::new(true)).await; +async fn estimate_gas_after_snapshot_recovery(method: EstimateMethod) { + test_http_server(EstimateGasTest::new(method, true)).await; } #[derive(Debug)] @@ -818,9 +1126,7 @@ impl HttpTest for EstimateGasWithStateOverrideTest { if let ClientError::Call(error) = error { let error_msg = error.message(); assert!( - error_msg - .to_lowercase() - .contains("insufficient balance for transfer"), + error_msg.to_lowercase().contains("insufficient funds"), "{error_msg}" ); } else { @@ -832,6 +1138,87 @@ impl HttpTest for EstimateGasWithStateOverrideTest { #[tokio::test] async fn estimate_gas_with_state_override() { - let inner = EstimateGasTest::new(false); + let inner = EstimateGasTest::new(EstimateMethod::EthEstimateGas, false); test_http_server(EstimateGasWithStateOverrideTest { inner }).await; } + +#[derive(Debug)] +struct EstimateGasWithoutToAddressTest { + method: EstimateMethod, +} + +#[async_trait] +impl HttpTest for EstimateGasWithoutToAddressTest { + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let mut l2_transaction = create_l2_transaction(10, 100); + l2_transaction.execute.contract_address = None; + l2_transaction.common_data.signature = vec![]; // Remove invalidated signature so that it doesn't trip estimation logic + let err = self + .method + .query(client, l2_transaction.into()) + .await + .unwrap_err(); + assert_null_to_address_error(&err); + Ok(()) + } +} + +#[test_casing(3, EstimateMethod::ALL)] +#[tokio::test] +async fn estimate_gas_fails_without_to_address(method: EstimateMethod) { + test_http_server(EstimateGasWithoutToAddressTest { method }).await; +} + +#[derive(Debug)] +struct EstimateGasTestWithEvmEmulator { + method: EstimateMethod, +} + +#[async_trait] +impl HttpTest for EstimateGasTestWithEvmEmulator { + fn storage_initialization(&self) -> StorageInitialization { + StorageInitialization::genesis_with_evm() + } + + fn transaction_executor(&self) -> MockOneshotExecutor { + let mut executor = MockOneshotExecutor::default(); + executor.set_tx_responses(evm_emulator_responses); + executor + } + + fn executor_options(&self) -> Option { + Some(executor_options_with_evm_emulator()) + } + + async fn test( + &self, + client: &DynClient, + _pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let call_request = CallRequest { + from: Some(Address::repeat_byte(1)), + to: Some(Address::repeat_byte(2)), + ..CallRequest::default() + }; + self.method.query(client, call_request).await?; + + let call_request = CallRequest { + from: Some(Address::repeat_byte(1)), + to: None, + data: Some(b"no_target".to_vec().into()), + ..CallRequest::default() + }; + self.method.query(client, call_request).await?; + Ok(()) + } +} + +#[test_casing(3, EstimateMethod::ALL)] +#[tokio::test] +async fn estimate_gas_with_evm_emulator(method: EstimateMethod) { + test_http_server(EstimateGasTestWithEvmEmulator { method }).await; +} diff --git a/core/node/api_server/src/web3/tests/ws.rs b/core/node/api_server/src/web3/tests/ws.rs index 28b2e2beb55..008747a63bc 100644 --- a/core/node/api_server/src/web3/tests/ws.rs +++ b/core/node/api_server/src/web3/tests/ws.rs @@ -147,7 +147,7 @@ async fn notifiers_start_after_snapshot_recovery() { trait WsTest: Send + Sync { /// Prepares the storage before the server is started. The default implementation performs genesis. fn storage_initialization(&self) -> StorageInitialization { - StorageInitialization::Genesis + StorageInitialization::genesis() } async fn test( @@ -234,7 +234,7 @@ impl WsTest for BasicSubscriptionsTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } @@ -403,7 +403,7 @@ impl WsTest for LogSubscriptionsTest { if self.snapshot_recovery { StorageInitialization::empty_recovery() } else { - StorageInitialization::Genesis + StorageInitialization::genesis() } } diff --git a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs index 0199b06ebd6..0922101e59d 100644 --- a/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs +++ b/core/node/base_token_adjuster/src/base_token_l1_behaviour.rs @@ -6,7 +6,7 @@ use std::{ }; use anyhow::Context; -use bigdecimal::{num_bigint::ToBigInt, BigDecimal, Zero}; +use bigdecimal::{BigDecimal, Zero}; use zksync_config::BaseTokenAdjusterConfig; use zksync_eth_client::{BoundEthInterface, CallFunctionArgs, Options}; use zksync_node_fee_model::l1_gas_price::TxParamsProvider; @@ -57,7 +57,7 @@ impl BaseTokenL1Behaviour { self.update_last_persisted_l1_ratio(prev_ratio.clone()); tracing::info!( "Fetched current base token ratio from the L1: {}", - prev_ratio.to_bigint().unwrap() + prev_ratio ); prev_ratio }; @@ -71,7 +71,7 @@ impl BaseTokenL1Behaviour { "Skipping L1 update. current_ratio {}, previous_ratio {}, deviation {}", current_ratio, prev_ratio, - deviation.to_bigint().unwrap() + deviation ); return Ok(()); } @@ -98,7 +98,7 @@ impl BaseTokenL1Behaviour { new_ratio.denominator.get(), base_fee_per_gas, priority_fee_per_gas, - deviation.to_bigint().unwrap() + deviation ); METRICS .l1_gas_used diff --git a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs index 220f100e5dc..785c9c4dfd7 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs @@ -90,6 +90,9 @@ impl BaseTokenRatioPersister { result: OperationResult::Success, }] .observe(start_time.elapsed()); + METRICS + .ratio + .set((ratio.numerator.get() as f64) / (ratio.denominator.get() as f64)); return Ok(ratio); } Err(err) => { diff --git a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs index e16ea16ff0f..b613e5219dd 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_provider.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_provider.rs @@ -81,7 +81,7 @@ impl DBBaseTokenRatioProvider { // Though the DB should be populated very soon after the server starts, it is possible // to have no ratios in the DB right after genesis. Having initial ratios in the DB // from the genesis stage will eliminate this possibility. - tracing::error!("No latest price found in the database. Using default ratio."); + tracing::warn!("No latest price found in the database. Using default ratio."); BaseTokenConversionRatio::default() } Err(err) => anyhow::bail!("Failed to get latest base token ratio: {:?}", err), diff --git a/core/node/base_token_adjuster/src/metrics.rs b/core/node/base_token_adjuster/src/metrics.rs index d84e4da0c0c..17a48c1b5c3 100644 --- a/core/node/base_token_adjuster/src/metrics.rs +++ b/core/node/base_token_adjuster/src/metrics.rs @@ -18,6 +18,7 @@ pub(crate) struct OperationResultLabels { #[metrics(prefix = "base_token_adjuster")] pub(crate) struct BaseTokenAdjusterMetrics { pub l1_gas_used: Gauge, + pub ratio: Gauge, #[metrics(buckets = Buckets::LATENCIES)] pub external_price_api_latency: Family>, #[metrics(buckets = Buckets::LATENCIES)] diff --git a/core/node/block_reverter/src/tests.rs b/core/node/block_reverter/src/tests.rs index e54b0490aa1..b2c4ee6465f 100644 --- a/core/node/block_reverter/src/tests.rs +++ b/core/node/block_reverter/src/tests.rs @@ -59,7 +59,6 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora l1_tx_count: 0, l2_tx_count: 0, fee_account_address: Address::default(), - pubdata_params: Default::default(), base_fee_per_gas: 0, batch_fee_input: Default::default(), gas_per_pubdata_limit: 0, @@ -68,6 +67,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; storage .blocks_dal() @@ -88,6 +88,7 @@ async fn setup_storage(storage: &mut Connection<'_, Core>, storage_logs: &[Stora system_logs: vec![], protocol_version: Some(ProtocolVersionId::latest()), pubdata_input: None, + fee_address: Default::default(), }; storage .blocks_dal() diff --git a/core/node/commitment_generator/Cargo.toml b/core/node/commitment_generator/Cargo.toml index 5ec8410124f..1f4645414cb 100644 --- a/core/node/commitment_generator/Cargo.toml +++ b/core/node/commitment_generator/Cargo.toml @@ -20,6 +20,7 @@ zksync_utils.workspace = true zksync_eth_client.workspace = true zksync_contracts.workspace = true zksync_multivm.workspace = true +zksync_system_constants.workspace = true circuit_sequencer_api_1_4_0.workspace = true circuit_sequencer_api_1_4_1.workspace = true circuit_sequencer_api_1_5_0.workspace = true diff --git a/core/node/commitment_generator/src/lib.rs b/core/node/commitment_generator/src/lib.rs index d592845e6df..294b6c50985 100644 --- a/core/node/commitment_generator/src/lib.rs +++ b/core/node/commitment_generator/src/lib.rs @@ -10,7 +10,7 @@ use zksync_multivm::zk_evm_latest::ethereum_types::U256; use zksync_types::{ blob::num_blobs_required, commitment::{ - AuxCommitments, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, + AuxCommitments, BlobHash, CommitmentCommonInput, CommitmentInput, L1BatchAuxiliaryOutput, L1BatchCommitment, L1BatchCommitmentArtifacts, L1BatchCommitmentMode, }, writes::{InitialStorageWrite, RepeatedStorageWrite, StateDiffRecord}, @@ -180,6 +180,7 @@ impl CommitmentGenerator { rollup_root_hash: tree_data.hash, bootloader_code_hash: header.base_system_contracts_hashes.bootloader, default_aa_code_hash: header.base_system_contracts_hashes.default_aa, + evm_emulator_code_hash: header.base_system_contracts_hashes.evm_emulator, protocol_version, }; let touched_slots = connection @@ -266,7 +267,7 @@ impl CommitmentGenerator { } state_diffs.sort_unstable_by_key(|rec| (rec.address, rec.key)); - let (blob_commitments, blob_linear_hashes) = if protocol_version.is_post_1_4_2() { + let blob_hashes = if protocol_version.is_post_1_4_2() { let pubdata_input = header.pubdata_input.with_context(|| { format!("`pubdata_input` is missing for L1 batch #{l1_batch_number}") })?; @@ -280,28 +281,35 @@ impl CommitmentGenerator { pubdata_input, ); - (commitments, linear_hashes) + commitments + .into_iter() + .zip(linear_hashes) + .map(|(commitment, linear_hash)| BlobHash { + commitment, + linear_hash, + }) + .collect::>() } else { - ( - vec![H256::zero(); num_blobs_required(&protocol_version)], - vec![H256::zero(); num_blobs_required(&protocol_version)], - ) + vec![Default::default(); num_blobs_required(&protocol_version)] }; - let mut connection = self - .connection_pool - .connection_tagged("commitment_generator") - .await?; - let aggregated_root = read_aggregation_root(&mut connection, l1_batch_number).await?; + let aggregation_root = if protocol_version.is_pre_gateway() { + H256::zero() + } else { + let mut connection = self + .connection_pool + .connection_tagged("commitment_generator") + .await?; + read_aggregation_root(&mut connection, l1_batch_number).await? + }; CommitmentInput::PostBoojum { common, system_logs: header.system_logs, state_diffs, aux_commitments, - blob_commitments, - blob_linear_hashes, - aggregated_root, + blob_hashes, + aggregation_root, } }; @@ -380,13 +388,10 @@ impl CommitmentGenerator { (L1BatchCommitmentMode::Rollup, _) => { // Do nothing } - ( - L1BatchCommitmentMode::Validium, - CommitmentInput::PostBoojum { - blob_commitments, .. - }, - ) => { - blob_commitments.fill(H256::zero()); + (L1BatchCommitmentMode::Validium, CommitmentInput::PostBoojum { blob_hashes, .. }) => { + for hashes in blob_hashes { + hashes.commitment = H256::zero(); + } } (L1BatchCommitmentMode::Validium, _) => { /* Do nothing */ } } @@ -396,14 +401,9 @@ impl CommitmentGenerator { match (self.commitment_mode, &mut commitment.auxiliary_output) { ( L1BatchCommitmentMode::Validium, - L1BatchAuxiliaryOutput::PostBoojum { - blob_linear_hashes, - blob_commitments, - .. - }, + L1BatchAuxiliaryOutput::PostBoojum { blob_hashes, .. }, ) => { - blob_linear_hashes.fill(H256::zero()); - blob_commitments.fill(H256::zero()); + blob_hashes.fill(Default::default()); } _ => { /* Do nothing */ } } diff --git a/core/node/commitment_generator/src/utils.rs b/core/node/commitment_generator/src/utils.rs index 9ed6682733c..d405a1256a2 100644 --- a/core/node/commitment_generator/src/utils.rs +++ b/core/node/commitment_generator/src/utils.rs @@ -19,6 +19,7 @@ use zk_evm_1_5_0::{ use zksync_dal::{Connection, Core, CoreDal}; use zksync_l1_contract_interface::i_executor::commit::kzg::ZK_SYNC_BYTES_PER_BLOB; use zksync_multivm::{interface::VmEvent, utils::get_used_bootloader_memory_bytes}; +use zksync_system_constants::message_root::{AGG_TREE_HEIGHT_KEY, AGG_TREE_NODES_KEY}; use zksync_types::{ vm::VmVersion, web3::keccak256, @@ -249,9 +250,11 @@ pub(crate) fn pubdata_to_blob_linear_hashes( // Now, we need to calculate the linear hashes of the blobs. // Firstly, let's pad the pubdata to the size of the blob. if pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB != 0 { - let padding = - vec![0u8; ZK_SYNC_BYTES_PER_BLOB - pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB]; - pubdata_input.extend(padding); + pubdata_input.resize( + pubdata_input.len() + + (ZK_SYNC_BYTES_PER_BLOB - pubdata_input.len() % ZK_SYNC_BYTES_PER_BLOB), + 0, + ); } let mut result = vec![H256::zero(); blobs_required]; @@ -270,12 +273,6 @@ pub(crate) async fn read_aggregation_root( connection: &mut Connection<'_, Core>, l1_batch_number: L1BatchNumber, ) -> anyhow::Result { - // Position of `FullTree::_height` in `MessageRoot`'s storage layout. - const AGG_TREE_HEIGHT_KEY: usize = 3; - - // Position of `FullTree::nodes` in `MessageRoot`'s storage layout. - const AGG_TREE_NODES_KEY: usize = 5; - let (_, last_l2_block) = connection .blocks_dal() .get_l2_block_range_of_l1_batch(l1_batch_number) @@ -284,7 +281,7 @@ pub(crate) async fn read_aggregation_root( let agg_tree_height_slot = StorageKey::new( AccountTreeId::new(L2_MESSAGE_ROOT_ADDRESS), - u256_to_h256(AGG_TREE_HEIGHT_KEY.into()), + H256::from_low_u64_be(AGG_TREE_HEIGHT_KEY as u64), ); let agg_tree_height = connection diff --git a/core/node/consensus/src/batch.rs b/core/node/consensus/src/batch.rs deleted file mode 100644 index 08246c4e5c0..00000000000 --- a/core/node/consensus/src/batch.rs +++ /dev/null @@ -1,275 +0,0 @@ -//! L1 Batch representation for sending over p2p network. -use anyhow::Context as _; -use zksync_concurrency::{ctx, error::Wrap as _}; -use zksync_consensus_roles::validator; -use zksync_dal::consensus_dal::Payload; -use zksync_l1_contract_interface::i_executor; -use zksync_metadata_calculator::api_server::{TreeApiClient, TreeEntryWithProof}; -use zksync_system_constants as constants; -use zksync_types::{ - abi, - block::{unpack_block_info, L2BlockHasher}, - AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, Transaction, H256, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::storage::ConnectionPool; - -/// Commitment to the last block of a batch. -pub(crate) struct LastBlockCommit { - /// Hash of the `StoredBatchInfo` which is stored on L1. - /// The hashed `StoredBatchInfo` contains a `root_hash` of the L2 state, - /// which contains state of the `SystemContext` contract, - /// which contains enough data to reconstruct the hash - /// of the last L2 block of the batch. - pub(crate) info: H256, -} - -/// Witness proving what is the last block of a batch. -/// Contains the hash and the number of the last block. -pub(crate) struct LastBlockWitness { - info: i_executor::structures::StoredBatchInfo, - protocol_version: ProtocolVersionId, - - current_l2_block_info: TreeEntryWithProof, - tx_rolling_hash: TreeEntryWithProof, - l2_block_hash_entry: TreeEntryWithProof, -} - -/// Commitment to an L1 batch. -pub(crate) struct L1BatchCommit { - pub(crate) number: L1BatchNumber, - pub(crate) this_batch: LastBlockCommit, - pub(crate) prev_batch: LastBlockCommit, -} - -/// L1Batch with witness that can be -/// verified against `L1BatchCommit`. -pub struct L1BatchWithWitness { - pub(crate) blocks: Vec, - pub(crate) this_batch: LastBlockWitness, - pub(crate) prev_batch: LastBlockWitness, -} - -impl LastBlockWitness { - /// Address of the SystemContext contract. - fn system_context_addr() -> AccountTreeId { - AccountTreeId::new(constants::SYSTEM_CONTEXT_ADDRESS) - } - - /// Storage key of the `SystemContext.current_l2_block_info` field. - fn current_l2_block_info_key() -> U256 { - StorageKey::new( - Self::system_context_addr(), - constants::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ) - .hashed_key_u256() - } - - /// Storage key of the `SystemContext.tx_rolling_hash` field. - fn tx_rolling_hash_key() -> U256 { - StorageKey::new( - Self::system_context_addr(), - constants::SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ) - .hashed_key_u256() - } - - /// Storage key of the entry of the `SystemContext.l2BlockHash[]` array, corresponding to l2 - /// block with number i. - fn l2_block_hash_entry_key(i: L2BlockNumber) -> U256 { - let key = h256_to_u256(constants::SYSTEM_CONTEXT_CURRENT_L2_BLOCK_HASHES_POSITION) - + U256::from(i.0) % U256::from(constants::SYSTEM_CONTEXT_STORED_L2_BLOCK_HASHES); - StorageKey::new(Self::system_context_addr(), u256_to_h256(key)).hashed_key_u256() - } - - /// Loads a `LastBlockWitness` from storage. - async fn load( - ctx: &ctx::Ctx, - n: L1BatchNumber, - pool: &ConnectionPool, - tree: &dyn TreeApiClient, - ) -> ctx::Result { - let mut conn = pool.connection(ctx).await.wrap("pool.connection()")?; - let batch = conn - .batch(ctx, n) - .await - .wrap("batch()")? - .context("batch not in storage")?; - - let proofs = tree - .get_proofs( - n, - vec![ - Self::current_l2_block_info_key(), - Self::tx_rolling_hash_key(), - ], - ) - .await - .context("get_proofs()")?; - if proofs.len() != 2 { - return Err(anyhow::format_err!("proofs.len()!=2").into()); - } - let current_l2_block_info = proofs[0].clone(); - let tx_rolling_hash = proofs[1].clone(); - let (block_number, _) = unpack_block_info(current_l2_block_info.value.as_bytes().into()); - let prev = L2BlockNumber( - block_number - .checked_sub(1) - .context("L2BlockNumber underflow")? - .try_into() - .context("L2BlockNumber overflow")?, - ); - let proofs = tree - .get_proofs(n, vec![Self::l2_block_hash_entry_key(prev)]) - .await - .context("get_proofs()")?; - if proofs.len() != 1 { - return Err(anyhow::format_err!("proofs.len()!=1").into()); - } - let l2_block_hash_entry = proofs[0].clone(); - Ok(Self { - info: i_executor::structures::StoredBatchInfo::from(&batch), - protocol_version: batch - .header - .protocol_version - .context("missing protocol_version")?, - - current_l2_block_info, - tx_rolling_hash, - l2_block_hash_entry, - }) - } - - /// Verifies the proof against the commit and returns the hash - /// of the last L2 block. - pub(crate) fn verify(&self, comm: &LastBlockCommit) -> anyhow::Result<(L2BlockNumber, H256)> { - // Verify info. - anyhow::ensure!(comm.info == self.info.hash()); - - // Check the protocol version. - anyhow::ensure!( - self.protocol_version >= ProtocolVersionId::Version13, - "unsupported protocol version" - ); - - let (block_number, block_timestamp) = - unpack_block_info(self.current_l2_block_info.value.as_bytes().into()); - let prev = L2BlockNumber( - block_number - .checked_sub(1) - .context("L2BlockNumber underflow")? - .try_into() - .context("L2BlockNumber overflow")?, - ); - - // Verify merkle paths. - self.current_l2_block_info - .verify(Self::current_l2_block_info_key(), self.info.batch_hash) - .context("invalid merkle path for current_l2_block_info")?; - self.tx_rolling_hash - .verify(Self::tx_rolling_hash_key(), self.info.batch_hash) - .context("invalid merkle path for tx_rolling_hash")?; - self.l2_block_hash_entry - .verify(Self::l2_block_hash_entry_key(prev), self.info.batch_hash) - .context("invalid merkle path for l2_block_hash entry")?; - - let block_number = L2BlockNumber(block_number.try_into().context("block_number overflow")?); - // Derive hash of the last block - Ok(( - block_number, - L2BlockHasher::hash( - block_number, - block_timestamp, - self.l2_block_hash_entry.value, - self.tx_rolling_hash.value, - self.protocol_version, - ), - )) - } - - /// Last L2 block of the batch. - pub fn last_block(&self) -> validator::BlockNumber { - let (n, _) = unpack_block_info(self.current_l2_block_info.value.as_bytes().into()); - validator::BlockNumber(n) - } -} - -impl L1BatchWithWitness { - /// Loads an `L1BatchWithWitness` from storage. - pub(crate) async fn load( - ctx: &ctx::Ctx, - number: L1BatchNumber, - pool: &ConnectionPool, - tree: &dyn TreeApiClient, - ) -> ctx::Result { - let prev_batch = LastBlockWitness::load(ctx, number - 1, pool, tree) - .await - .with_wrap(|| format!("LastBlockWitness::make({})", number - 1))?; - let this_batch = LastBlockWitness::load(ctx, number, pool, tree) - .await - .with_wrap(|| format!("LastBlockWitness::make({number})"))?; - let mut conn = pool.connection(ctx).await.wrap("connection()")?; - let this = Self { - blocks: conn - .payloads( - ctx, - std::ops::Range { - start: prev_batch.last_block() + 1, - end: this_batch.last_block() + 1, - }, - ) - .await - .wrap("payloads()")?, - prev_batch, - this_batch, - }; - Ok(this) - } - - /// Verifies the L1Batch and witness against the commitment. - /// WARNING: the following fields of the payload are not currently verified: - /// * `l1_gas_price` - /// * `l2_fair_gas_price` - /// * `fair_pubdata_price` - /// * `virtual_blocks` - /// * `operator_address` - /// * `protocol_version` (present both in payload and witness, but neither has a commitment) - pub(crate) fn verify(&self, comm: &L1BatchCommit) -> anyhow::Result<()> { - let (last_number, last_hash) = self.this_batch.verify(&comm.this_batch)?; - let (mut prev_number, mut prev_hash) = self.prev_batch.verify(&comm.prev_batch)?; - anyhow::ensure!( - self.prev_batch - .info - .batch_number - .checked_add(1) - .context("batch_number overflow")? - == u64::from(comm.number.0) - ); - anyhow::ensure!(self.this_batch.info.batch_number == u64::from(comm.number.0)); - for (i, b) in self.blocks.iter().enumerate() { - anyhow::ensure!(b.l1_batch_number == comm.number); - anyhow::ensure!(b.protocol_version == self.this_batch.protocol_version); - anyhow::ensure!(b.last_in_batch == (i + 1 == self.blocks.len())); - prev_number += 1; - let mut hasher = L2BlockHasher::new(prev_number, b.timestamp, prev_hash); - for t in &b.transactions { - // Reconstruct transaction by converting it back and forth to `abi::Transaction`. - // This allows us to verify that the transaction actually matches the transaction - // hash. - // TODO: make consensus payload contain `abi::Transaction` instead. - // TODO: currently the payload doesn't contain the block number, which is - // annoying. Consider adding it to payload. - let t2: Transaction = abi::Transaction::try_from(t.clone())?.try_into()?; - anyhow::ensure!(t == &t2); - hasher.push_tx_hash(t.hash()); - } - prev_hash = hasher.finalize(self.this_batch.protocol_version); - anyhow::ensure!(prev_hash == b.hash); - } - anyhow::ensure!(prev_hash == last_hash); - anyhow::ensure!(prev_number == last_number); - Ok(()) - } -} diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index 3584d533f66..4ad7a551ab4 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -169,7 +169,6 @@ pub(super) fn executor( server_addr: cfg.server_addr, public_addr: net::Host(cfg.public_addr.0.clone()), max_payload_size: cfg.max_payload_size, - max_batch_size: cfg.max_batch_size, node_key: node_key(secrets) .context("node_key")? .context("missing node_key")?, @@ -184,6 +183,5 @@ pub(super) fn executor( gossip_static_outbound, rpc, debug_page, - batch_poll_interval: time::Duration::seconds(1), }) } diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index e4be8d9d687..5e9aadc8f37 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -4,7 +4,7 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_consensus_storage::{BlockStore, PersistentBlockStore as _}; use zksync_dal::consensus_dal; use zksync_node_sync::{fetcher::FetchedBlock, sync_action::ActionQueueSender, SyncState}; use zksync_types::L2BlockNumber; @@ -21,6 +21,10 @@ use crate::{ storage::{self, ConnectionPool}, }; +/// If less than TEMPORARY_FETCHER_THRESHOLD certificates are missing, +/// the temporary fetcher will stop fetching blocks. +pub(crate) const TEMPORARY_FETCHER_THRESHOLD: u64 = 10; + /// External node. pub(super) struct EN { pub(super) pool: ConnectionPool, @@ -32,8 +36,13 @@ impl EN { /// Task running a consensus node for the external node. /// It may be a validator, but it cannot be a leader (cannot propose blocks). /// - /// NOTE: Before starting the consensus node it fetches all the blocks + /// If `enable_pregenesis` is false, + /// before starting the consensus node it fetches all the blocks /// older than consensus genesis from the main node using json RPC. + /// NOTE: currently `enable_pregenesis` is hardcoded to `false` in `era.rs`. + /// True is used only in tests. Once the `block_metadata` RPC is enabled everywhere + /// this flag should be removed and fetching pregenesis blocks will always be done + /// over the gossip network. pub async fn run( self, ctx: &ctx::Ctx, @@ -41,6 +50,7 @@ impl EN { cfg: ConsensusConfig, secrets: ConsensusSecrets, build_version: Option, + enable_pregenesis: bool, ) -> anyhow::Result<()> { let attester = config::attester_key(&secrets).context("attester_key")?; @@ -72,13 +82,15 @@ impl EN { drop(conn); // Fetch blocks before the genesis. - self.fetch_blocks( - ctx, - &mut payload_queue, - Some(global_config.genesis.first_block), - ) - .await - .wrap("fetch_blocks()")?; + if !enable_pregenesis { + self.fetch_blocks( + ctx, + &mut payload_queue, + Some(global_config.genesis.first_block), + ) + .await + .wrap("fetch_blocks()")?; + } // Monitor the genesis of the main node. // If it changes, it means that a hard fork occurred and we need to reset the consensus state. @@ -88,7 +100,12 @@ impl EN { let old = old; loop { if let Ok(new) = self.fetch_global_config(ctx).await { - if new != old { + // We verify the transition here to work around the situation + // where `consenus_global_config()` RPC fails randomly and fallback + // to `consensus_genesis()` RPC activates. + if new != old + && consensus_dal::verify_config_transition(&old, &new).is_ok() + { return Err(anyhow::format_err!( "global config changed: old {old:?}, new {new:?}" ) @@ -102,21 +119,35 @@ impl EN { // Run consensus component. // External nodes have a payload queue which they use to fetch data from the main node. - let (store, runner) = Store::new(ctx, self.pool.clone(), Some(payload_queue)) - .await - .wrap("Store::new()")?; + let (store, runner) = Store::new( + ctx, + self.pool.clone(), + Some(payload_queue), + Some(self.client.clone()), + ) + .await + .wrap("Store::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + // Run the temporary fetcher until the certificates are backfilled. + // Temporary fetcher should be removed once json RPC syncing is fully deprecated. + s.spawn_bg({ + let store = store.clone(); + async { + let store = store; + self.temporary_block_fetcher(ctx, &store).await?; + tracing::info!( + "temporary block fetcher finished, switching to p2p fetching only" + ); + Ok(()) + } + }); + let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())) .await .wrap("BlockStore::new()")?; s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) - .await - .wrap("BatchStore::new()")?; - s.spawn_bg(async { Ok(runner.run(ctx).await?) }); - let attestation = Arc::new(attestation::Controller::new(attester)); s.spawn_bg(self.run_attestation_controller( ctx, @@ -127,7 +158,6 @@ impl EN { let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, build_version)?, block_store, - batch_store, validator: config::validator_key(&secrets) .context("validator_key")? .map(|key| executor::Validator { @@ -192,7 +222,11 @@ impl EN { let mut next = attester::BatchNumber(0); loop { let status = loop { - match self.fetch_attestation_status(ctx).await { + match self + .fetch_attestation_status(ctx) + .await + .wrap("fetch_attestation_status()") + { Err(err) => tracing::warn!("{err:#}"), Ok(status) => { if status.genesis != cfg.genesis.hash() { @@ -210,10 +244,13 @@ impl EN { "waiting for hash of batch {:?}", status.next_batch_to_attest ); - let hash = self - .pool - .wait_for_batch_hash(ctx, status.next_batch_to_attest) - .await?; + let hash = consensus_dal::batch_hash( + &self + .pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?, + ); let Some(committee) = registry .attester_committee_for( ctx, @@ -348,8 +385,42 @@ impl EN { } } + /// Fetches blocks from the main node directly, until the certificates + /// are backfilled. This allows for smooth transition from json RPC to p2p block syncing. + pub(crate) async fn temporary_block_fetcher( + &self, + ctx: &ctx::Ctx, + store: &Store, + ) -> ctx::Result<()> { + const MAX_CONCURRENT_REQUESTS: usize = 30; + scope::run!(ctx, |ctx, s| async { + let (send, mut recv) = ctx::channel::bounded(MAX_CONCURRENT_REQUESTS); + s.spawn(async { + let Some(mut next) = store.next_block(ctx).await? else { + return Ok(()); + }; + while store.persisted().borrow().next().0 + TEMPORARY_FETCHER_THRESHOLD < next.0 { + let n = L2BlockNumber(next.0.try_into().context("overflow")?); + self.sync_state.wait_for_main_node_block(ctx, n).await?; + send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; + next = next.next(); + } + drop(send); + Ok(()) + }); + while let Ok(block) = recv.recv_or_disconnected(ctx).await? { + store + .queue_next_fetched_block(ctx, block.join(ctx).await?) + .await + .wrap("queue_next_fetched_block()")?; + } + Ok(()) + }) + .await + } + /// Fetches blocks from the main node in range `[cursor.next()..end)`. - pub(super) async fn fetch_blocks( + async fn fetch_blocks( &self, ctx: &ctx::Ctx, queue: &mut storage::PayloadQueue, @@ -363,7 +434,7 @@ impl EN { s.spawn(async { let send = send; while end.map_or(true, |end| next < end) { - let n = L2BlockNumber(next.0.try_into().unwrap()); + let n = L2BlockNumber(next.0.try_into().context("overflow")?); self.sync_state.wait_for_main_node_block(ctx, n).await?; send.send(ctx, s.spawn(self.fetch_block(ctx, n))).await?; next = next.next(); @@ -372,7 +443,7 @@ impl EN { }); while end.map_or(true, |end| queue.next() < end) { let block = recv.recv(ctx).await?.join(ctx).await?; - queue.send(block).await?; + queue.send(block).await.context("queue.send()")?; } Ok(()) }) @@ -381,7 +452,8 @@ impl EN { if first < queue.next() { self.pool .wait_for_payload(ctx, queue.next().prev().unwrap()) - .await?; + .await + .wrap("wait_for_payload()")?; } Ok(()) } diff --git a/core/node/consensus/src/era.rs b/core/node/consensus/src/era.rs index 3150f839680..916b7cdd89a 100644 --- a/core/node/consensus/src/era.rs +++ b/core/node/consensus/src/era.rs @@ -59,8 +59,18 @@ pub async fn run_external_node( is_validator = secrets.validator_key.is_some(), "running external node" ); - en.run(ctx, actions, cfg, secrets, Some(build_version)) - .await + // We will enable it once the main node on all envs supports + // `block_metadata()` JSON RPC method. + let enable_pregenesis = false; + en.run( + ctx, + actions, + cfg, + secrets, + Some(build_version), + enable_pregenesis, + ) + .await } None => { tracing::info!("running fetcher"); diff --git a/core/node/consensus/src/lib.rs b/core/node/consensus/src/lib.rs index ff9cdf86528..8bf078120aa 100644 --- a/core/node/consensus/src/lib.rs +++ b/core/node/consensus/src/lib.rs @@ -6,10 +6,6 @@ use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; mod abi; -// Currently `batch` module is only used in tests, -// but will be used in production once batch syncing is implemented in consensus. -#[allow(unused)] -mod batch; mod config; mod en; pub mod era; diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index f80bfe58954..2a280b2f161 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -5,12 +5,12 @@ use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; use zksync_consensus_executor::{self as executor, attestation}; use zksync_consensus_roles::{attester, validator}; -use zksync_consensus_storage::{BatchStore, BlockStore}; +use zksync_consensus_storage::BlockStore; use zksync_dal::consensus_dal; use crate::{ config, registry, - storage::{ConnectionPool, InsertCertificateError, Store}, + storage::{ConnectionPool, Store}, }; /// Task running a consensus validator for the main node. @@ -43,7 +43,7 @@ pub async fn run_main_node( } // The main node doesn't have a payload queue as it produces all the L2 blocks itself. - let (store, runner) = Store::new(ctx, pool.clone(), None) + let (store, runner) = Store::new(ctx, pool.clone(), None, None) .await .wrap("Store::new()")?; s.spawn_bg(runner.run(ctx)); @@ -67,11 +67,6 @@ pub async fn run_main_node( .wrap("BlockStore::new()")?; s.spawn_bg(runner.run(ctx)); - let (batch_store, runner) = BatchStore::new(ctx, Box::new(store.clone())) - .await - .wrap("BatchStore::new()")?; - s.spawn_bg(runner.run(ctx)); - let attestation = Arc::new(attestation::Controller::new(attester)); s.spawn_bg(run_attestation_controller( ctx, @@ -83,7 +78,6 @@ pub async fn run_main_node( let executor = executor::Executor { config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, - batch_store, validator: Some(executor::Validator { key: validator_key, replica_store: Box::new(store.clone()), @@ -135,9 +129,10 @@ async fn run_attestation_controller( "waiting for hash of batch {:?}", status.next_batch_to_attest ); - let hash = pool - .wait_for_batch_hash(ctx, status.next_batch_to_attest) + let info = pool + .wait_for_batch_info(ctx, status.next_batch_to_attest, POLL_INTERVAL) .await?; + let hash = consensus_dal::batch_hash(&info); let Some(committee) = registry .attester_committee_for(ctx, registry_addr, status.next_batch_to_attest) .await @@ -184,10 +179,7 @@ async fn run_attestation_controller( .wrap("connection()")? .insert_batch_certificate(ctx, &qc) .await - .map_err(|err| match err { - InsertCertificateError::Canceled(err) => ctx::Error::Canceled(err), - InsertCertificateError::Inner(err) => ctx::Error::Internal(err.into()), - })?; + .wrap("insert_batch_certificate()")?; } } .await; diff --git a/core/node/consensus/src/registry/abi.rs b/core/node/consensus/src/registry/abi.rs index 55cc7f9264f..57c65b10ce5 100644 --- a/core/node/consensus/src/registry/abi.rs +++ b/core/node/consensus/src/registry/abi.rs @@ -19,7 +19,8 @@ impl AsRef for ConsensusRegistry { } impl ConsensusRegistry { - const FILE: &'static str = "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; + const FILE: &'static str = + "contracts/l2-contracts/artifacts-zk/contracts/ConsensusRegistry.sol/ConsensusRegistry.json"; /// Loads bytecode of the contract. #[cfg(test)] diff --git a/core/node/consensus/src/registry/tests.rs b/core/node/consensus/src/registry/tests.rs index 773a1fbbee7..89afc20e1d5 100644 --- a/core/node/consensus/src/registry/tests.rs +++ b/core/node/consensus/src/registry/tests.rs @@ -1,5 +1,5 @@ use rand::Rng as _; -use zksync_concurrency::{ctx, scope}; +use zksync_concurrency::{ctx, scope, time}; use zksync_consensus_roles::{attester, validator::testonly::Setup}; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; @@ -7,7 +7,9 @@ use zksync_types::ProtocolVersionId; use super::*; use crate::storage::ConnectionPool; -// Test checking that parsing logic matches the abi specified in the json file. +const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + +/// Test checking that parsing logic matches the abi specified in the json file. #[test] fn test_consensus_registry_abi() { zksync_concurrency::testonly::abort_on_panic(); @@ -73,10 +75,12 @@ async fn test_attester_committee() { node.push_block(&txs).await; node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_batch()).await?; + pool.wait_for_batch_info(ctx, node.last_batch(), POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // Read the attester committee using the vm. - let batch = attester::BatchNumber(node.last_batch().0.into()); + let batch = attester::BatchNumber(node.last_batch().0); assert_eq!( Some(committee), registry diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 0f9d7c8527f..6ec5794e968 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -1,18 +1,18 @@ use anyhow::Context as _; use zksync_concurrency::{ctx, error::Wrap as _, time}; -use zksync_consensus_crypto::keccak256::Keccak256; use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; -use zksync_consensus_storage::{self as storage, BatchStoreState}; -use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; +use zksync_consensus_storage as storage; +use zksync_dal::{ + consensus_dal::{AttestationStatus, BlockMetadata, GlobalConfig, Payload}, + Core, CoreDal, DalError, +}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{ - commitment::L1BatchWithMetadata, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, -}; +use zksync_types::{fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber}; use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; -use super::{InsertCertificateError, PayloadQueue}; +use super::PayloadQueue; use crate::config; /// Context-aware `zksync_dal::ConnectionPool` wrapper. @@ -54,24 +54,24 @@ impl ConnectionPool { /// Waits for the `number` L1 batch hash. #[tracing::instrument(skip_all)] - pub async fn wait_for_batch_hash( + pub async fn wait_for_batch_info( &self, ctx: &ctx::Ctx, number: attester::BatchNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + interval: time::Duration, + ) -> ctx::Result { loop { - if let Some(hash) = self + if let Some(info) = self .connection(ctx) .await .wrap("connection()")? - .batch_hash(ctx, number) + .batch_info(ctx, number) .await - .with_wrap(|| format!("batch_hash({number})"))? + .with_wrap(|| format!("batch_info({number})"))? { - return Ok(hash); + return Ok(info); } - ctx.sleep(POLL_INTERVAL).await?; + ctx.sleep(interval).await?; } } } @@ -109,16 +109,23 @@ impl<'a> Connection<'a> { .map_err(DalError::generalize)?) } - /// Wrapper for `consensus_dal().block_payloads()`. - pub async fn payloads( + pub async fn batch_info( &mut self, ctx: &ctx::Ctx, - numbers: std::ops::Range, - ) -> ctx::Result> { + n: attester::BatchNumber, + ) -> ctx::Result> { + Ok(ctx.wait(self.0.consensus_dal().batch_info(n)).await??) + } + + /// Wrapper for `consensus_dal().block_metadata()`. + pub async fn block_metadata( + &mut self, + ctx: &ctx::Ctx, + number: validator::BlockNumber, + ) -> ctx::Result> { Ok(ctx - .wait(self.0.consensus_dal().block_payloads(numbers)) - .await? - .map_err(DalError::generalize)?) + .wait(self.0.consensus_dal().block_metadata(number)) + .await??) } /// Wrapper for `consensus_dal().block_certificate()`. @@ -138,7 +145,7 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &validator::CommitQC, - ) -> Result<(), InsertCertificateError> { + ) -> Result<(), super::InsertCertificateError> { Ok(ctx .wait(self.0.consensus_dal().insert_block_certificate(cert)) .await??) @@ -151,20 +158,10 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, cert: &attester::BatchQC, - ) -> Result<(), InsertCertificateError> { - use consensus_dal::InsertCertificateError as E; - let want_hash = self - .batch_hash(ctx, cert.message.number) - .await - .wrap("batch_hash()")? - .ok_or(E::MissingPayload)?; - if want_hash != cert.message.hash { - return Err(E::PayloadMismatch.into()); - } + ) -> ctx::Result<()> { Ok(ctx .wait(self.0.consensus_dal().insert_batch_certificate(cert)) - .await? - .map_err(E::Other)?) + .await??) } /// Wrapper for `consensus_dal().upsert_attester_committee()`. @@ -203,37 +200,6 @@ impl<'a> Connection<'a> { .context("sqlx")?) } - /// Wrapper for `consensus_dal().batch_hash()`. - pub async fn batch_hash( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let n = L1BatchNumber(number.0.try_into().context("overflow")?); - let Some(meta) = ctx - .wait(self.0.blocks_dal().get_l1_batch_metadata(n)) - .await? - .context("get_l1_batch_metadata()")? - else { - return Ok(None); - }; - Ok(Some(attester::BatchHash(Keccak256::from_bytes( - StoredBatchInfo::from(&meta).hash().0, - )))) - } - - /// Wrapper for `blocks_dal().get_l1_batch_metadata()`. - pub async fn batch( - &mut self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.blocks_dal().get_l1_batch_metadata(number)) - .await? - .context("get_l1_batch_metadata()")?) - } - /// Wrapper for `FetcherCursor::new()`. pub async fn new_payload_queue( &mut self, @@ -249,10 +215,7 @@ impl<'a> Connection<'a> { } /// Wrapper for `consensus_dal().global_config()`. - pub async fn global_config( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { + pub async fn global_config(&mut self, ctx: &ctx::Ctx) -> ctx::Result> { Ok(ctx.wait(self.0.consensus_dal().global_config()).await??) } @@ -260,7 +223,7 @@ impl<'a> Connection<'a> { pub async fn try_update_global_config( &mut self, ctx: &ctx::Ctx, - cfg: &consensus_dal::GlobalConfig, + cfg: &GlobalConfig, ) -> ctx::Result<()> { Ok(ctx .wait(self.0.consensus_dal().try_update_global_config(cfg)) @@ -273,14 +236,14 @@ impl<'a> Connection<'a> { Ok(ctx.wait(self.0.consensus_dal().next_block()).await??) } - /// Wrapper for `consensus_dal().block_certificates_range()`. + /// Wrapper for `consensus_dal().block_store_state()`. #[tracing::instrument(skip_all)] - pub(crate) async fn block_certificates_range( + pub(crate) async fn block_store_state( &mut self, ctx: &ctx::Ctx, ) -> ctx::Result { Ok(ctx - .wait(self.0.consensus_dal().block_certificates_range()) + .wait(self.0.consensus_dal().block_store_state()) .await??) } @@ -305,7 +268,7 @@ impl<'a> Connection<'a> { } tracing::info!("Performing a hard fork of consensus."); - let new = consensus_dal::GlobalConfig { + let new = GlobalConfig { genesis: validator::GenesisRaw { chain_id: spec.chain_id, fork_number: old.as_ref().map_or(validator::ForkNumber(0), |old| { @@ -334,38 +297,35 @@ impl<'a> Connection<'a> { &mut self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::Result> { - let Some(justification) = self - .block_certificate(ctx, number) - .await - .wrap("block_certificate()")? - else { + ) -> ctx::Result> { + let Some(payload) = self.payload(ctx, number).await.wrap("payload()")? else { return Ok(None); }; - let payload = self - .payload(ctx, number) + if let Some(justification) = self + .block_certificate(ctx, number) .await - .wrap("payload()")? - .context("L2 block disappeared from storage")?; - - Ok(Some(validator::FinalBlock { - payload: payload.encode(), - justification, - })) - } + .wrap("block_certificate()")? + { + return Ok(Some( + validator::FinalBlock { + payload: payload.encode(), + justification, + } + .into(), + )); + } - /// Wrapper for `blocks_dal().get_sealed_l1_batch_number()`. - #[tracing::instrument(skip_all)] - pub async fn get_last_batch_number( - &mut self, - ctx: &ctx::Ctx, - ) -> ctx::Result> { - Ok(ctx - .wait(self.0.blocks_dal().get_sealed_l1_batch_number()) - .await? - .context("get_sealed_l1_batch_number()")? - .map(|nr| attester::BatchNumber(nr.0 as u64))) + Ok(Some( + validator::PreGenesisBlock { + number, + payload: payload.encode(), + // We won't use justification until it is possible to verify + // payload against the L1 batch commitment. + justification: validator::Justification(vec![]), + } + .into(), + )) } /// Wrapper for `blocks_dal().get_l2_block_range_of_l1_batch()`. @@ -388,83 +348,11 @@ impl<'a> Connection<'a> { })) } - /// Construct the [attester::SyncBatch] for a given batch number. - pub async fn get_batch( - &mut self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - let Some((min, max)) = self - .get_l2_block_range_of_l1_batch(ctx, number) - .await - .context("get_l2_block_range_of_l1_batch()")? - else { - return Ok(None); - }; - - let payloads = self.payloads(ctx, min..max).await.wrap("payloads()")?; - let payloads = payloads.into_iter().map(|p| p.encode()).collect(); - - // TODO: Fill out the proof when we have the stateless L1 batch validation story finished. - // It is supposed to be a Merkle proof that the rolling hash of the batch has been included - // in the L1 system contract state tree. It is *not* the Ethereum state root hash, so producing - // it can be done without an L1 client, which is only required for validation. - let batch = attester::SyncBatch { - number, - payloads, - proof: Vec::new(), - }; - - Ok(Some(batch)) - } - - /// Construct the [storage::BatchStoreState] which contains the earliest batch and the last available [attester::SyncBatch]. - #[tracing::instrument(skip_all)] - pub async fn batches_range(&mut self, ctx: &ctx::Ctx) -> ctx::Result { - let first = self - .0 - .blocks_dal() - .get_earliest_l1_batch_number() - .await - .context("get_earliest_l1_batch_number()")?; - - let first = if first.is_some() { - first - } else { - self.0 - .snapshot_recovery_dal() - .get_applied_snapshot_status() - .await - .context("get_earliest_l1_batch_number()")? - .map(|s| s.l1_batch_number) - }; - - // TODO: In the future when we start filling in the `SyncBatch::proof` field, - // we can only run `get_batch` expecting `Some` result on numbers where the - // L1 state root hash is already available, so that we can produce some - // Merkle proof that the rolling hash of the L2 blocks in the batch has - // been included in the L1 state tree. At that point we probably can't - // call `get_last_batch_number` here, but something that indicates that - // the hashes/commitments on the L1 batch are ready and the thing has - // been included in L1; that potentially requires an API client as well. - let last = self - .get_last_batch_number(ctx) - .await - .context("get_last_batch_number()")?; - - Ok(BatchStoreState { - first: first - .map(|n| attester::BatchNumber(n.0 as u64)) - .unwrap_or(attester::BatchNumber(0)), - last, - }) - } - /// Wrapper for `consensus_dal().attestation_status()`. pub async fn attestation_status( &mut self, ctx: &ctx::Ctx, - ) -> ctx::Result> { + ) -> ctx::Result> { Ok(ctx .wait(self.0.consensus_dal().attestation_status()) .await? diff --git a/core/node/consensus/src/storage/store.rs b/core/node/consensus/src/storage/store.rs index 96a47f5abe7..154509e97b1 100644 --- a/core/node/consensus/src/storage/store.rs +++ b/core/node/consensus/src/storage/store.rs @@ -1,15 +1,18 @@ use std::sync::Arc; use anyhow::Context as _; -use tokio::sync::watch::Sender; use tracing::Instrument; use zksync_concurrency::{ctx, error::Wrap as _, scope, sync, time}; use zksync_consensus_bft::PayloadManager; -use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; -use zksync_consensus_storage::{self as storage, BatchStoreState}; +use zksync_consensus_roles::validator; +use zksync_consensus_storage::{self as storage}; use zksync_dal::consensus_dal::{self, Payload}; use zksync_node_sync::fetcher::{FetchedBlock, FetchedTransaction}; use zksync_types::L2BlockNumber; +use zksync_web3_decl::{ + client::{DynClient, L2}, + namespaces::EnNamespaceClient as _, +}; use super::{Connection, PayloadQueue}; use crate::storage::{ConnectionPool, InsertCertificateError}; @@ -35,7 +38,7 @@ fn to_fetched_block( l1_gas_price: payload.l1_gas_price, l2_fair_gas_price: payload.l2_fair_gas_price, fair_pubdata_price: payload.fair_pubdata_price, - pubdata_params: payload.pubdata_params.unwrap_or_default(), + pubdata_params: payload.pubdata_params, virtual_blocks: payload.virtual_blocks, operator_address: payload.operator_address, transactions: payload @@ -47,7 +50,7 @@ fn to_fetched_block( } /// Wrapper of `ConnectionPool` implementing `ReplicaStore`, `PayloadManager`, -/// `PersistentBlockStore` and `PersistentBatchStore`. +/// `PersistentBlockStore`. /// /// Contains queues to save Quorum Certificates received over gossip to the store /// as and when the payload they are over becomes available. @@ -60,8 +63,8 @@ pub(crate) struct Store { block_certificates: ctx::channel::UnboundedSender, /// Range of L2 blocks for which we have a QC persisted. blocks_persisted: sync::watch::Receiver, - /// Range of L1 batches we have persisted. - batches_persisted: sync::watch::Receiver, + /// Main node client. None if this node is the main node. + client: Option>>, } struct PersistedBlockState(sync::watch::Sender); @@ -70,7 +73,6 @@ struct PersistedBlockState(sync::watch::Sender); pub struct StoreRunner { pool: ConnectionPool, blocks_persisted: PersistedBlockState, - batches_persisted: sync::watch::Sender, block_certificates: ctx::channel::UnboundedReceiver, } @@ -79,22 +81,15 @@ impl Store { ctx: &ctx::Ctx, pool: ConnectionPool, payload_queue: Option, + client: Option>>, ) -> ctx::Result<(Store, StoreRunner)> { let mut conn = pool.connection(ctx).await.wrap("connection()")?; // Initial state of persisted blocks - let blocks_persisted = conn - .block_certificates_range(ctx) - .await - .wrap("block_certificates_range()")?; - - // Initial state of persisted batches - let batches_persisted = conn.batches_range(ctx).await.wrap("batches_range()")?; - + let blocks_persisted = conn.block_store_state(ctx).await.wrap("blocks_range()")?; drop(conn); let blocks_persisted = sync::watch::channel(blocks_persisted).0; - let batches_persisted = sync::watch::channel(batches_persisted).0; let (block_certs_send, block_certs_recv) = ctx::channel::unbounded(); Ok(( @@ -103,12 +98,11 @@ impl Store { block_certificates: block_certs_send, block_payloads: Arc::new(sync::Mutex::new(payload_queue)), blocks_persisted: blocks_persisted.subscribe(), - batches_persisted: batches_persisted.subscribe(), + client, }, StoreRunner { pool, blocks_persisted: PersistedBlockState(blocks_persisted), - batches_persisted, block_certificates: block_certs_recv, }, )) @@ -118,6 +112,30 @@ impl Store { async fn conn(&self, ctx: &ctx::Ctx) -> ctx::Result { self.pool.connection(ctx).await.wrap("connection") } + + /// Number of the next block to queue. + pub(crate) async fn next_block( + &self, + ctx: &ctx::Ctx, + ) -> ctx::OrCanceled> { + Ok(sync::lock(ctx, &self.block_payloads) + .await? + .as_ref() + .map(|p| p.next())) + } + + /// Queues the next block. + pub(crate) async fn queue_next_fetched_block( + &self, + ctx: &ctx::Ctx, + block: FetchedBlock, + ) -> ctx::Result<()> { + let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + if let Some(payloads) = &mut *payloads { + payloads.send(block).await.context("payloads.send()")?; + } + Ok(()) + } } impl PersistedBlockState { @@ -126,7 +144,7 @@ impl PersistedBlockState { /// If `persisted.first` is moved forward, it means that blocks have been pruned. /// If `persisted.last` is moved forward, it means that new blocks with certificates have been /// persisted. - #[tracing::instrument(skip_all, fields(first = %new.first, last = ?new.last.as_ref().map(|l| l.message.proposal.number)))] + #[tracing::instrument(skip_all, fields(first = %new.first, next = ?new.next()))] fn update(&self, new: storage::BlockStoreState) { self.0.send_if_modified(|p| { if &new == p { @@ -140,10 +158,11 @@ impl PersistedBlockState { }); } - /// Checks if the given certificate is exactly the next one that should - /// be persisted. + /// Checks if the given certificate should be eventually persisted. + /// Only certificates block store state is a range of blocks for which we already have + /// certificates and we need certs only for the later ones. fn should_be_persisted(&self, cert: &validator::CommitQC) -> bool { - self.0.borrow().next() == cert.header().number + self.0.borrow().next() <= cert.header().number } /// Appends the `cert` to `persisted` range. @@ -153,7 +172,7 @@ impl PersistedBlockState { if p.next() != cert.header().number { return false; } - p.last = Some(cert); + p.last = Some(storage::Last::Final(cert)); true }); } @@ -164,7 +183,6 @@ impl StoreRunner { let StoreRunner { pool, blocks_persisted, - batches_persisted, mut block_certificates, } = self; @@ -177,13 +195,13 @@ impl StoreRunner { ) -> ctx::Result<()> { const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); - let range = pool + let state = pool .connection(ctx) .await? - .block_certificates_range(ctx) + .block_store_state(ctx) .await - .wrap("block_certificates_range()")?; - blocks_persisted.update(range); + .wrap("block_store_state()")?; + blocks_persisted.update(state); ctx.sleep(POLL_INTERVAL).await?; Ok(()) @@ -196,60 +214,6 @@ impl StoreRunner { } }); - #[tracing::instrument(skip_all, fields(l1_batch = %next_batch_number))] - async fn gossip_sync_batches_iteration( - ctx: &ctx::Ctx, - pool: &ConnectionPool, - next_batch_number: &mut BatchNumber, - batches_persisted: &Sender, - ) -> ctx::Result<()> { - const POLL_INTERVAL: time::Duration = time::Duration::seconds(1); - - let mut conn = pool.connection(ctx).await?; - if let Some(last_batch_number) = conn - .get_last_batch_number(ctx) - .await - .wrap("last_batch_number()")? - { - if last_batch_number >= *next_batch_number { - let range = conn.batches_range(ctx).await.wrap("batches_range()")?; - *next_batch_number = last_batch_number.next(); - tracing::info_span!("batches_persisted_send").in_scope(|| { - batches_persisted.send_replace(range); - }); - } - } - ctx.sleep(POLL_INTERVAL).await?; - - Ok(()) - } - - // NOTE: Running this update loop will trigger the gossip of `SyncBatches` which is currently - // pointless as there is no proof and we have to ignore them. We can disable it, but bear in - // mind that any node which gossips the availability will cause pushes and pulls in the consensus. - s.spawn::<()>(async { - // Loop updating `batches_persisted` whenever a new L1 batch is available in the database. - // We have to do this because the L1 batch is produced as L2 blocks are executed, - // which can happen on a different machine or in a different process, so we can't rely on some - // DAL method updating this memory construct. However I'm not sure that `BatchStoreState` - // really has to contain the full blown last batch, or whether it could have for example - // just the number of it. We can't just use the `attester::BatchQC`, which would make it - // analogous to the `BlockStoreState`, because the `SyncBatch` mechanism is for catching - // up with L1 batches from peers _without_ the QC, based on L1 inclusion proofs instead. - // Nevertheless since the `SyncBatch` contains all transactions for all L2 blocks, - // we can try to make it less frequent by querying just the last batch number first. - let mut next_batch_number = { batches_persisted.borrow().next() }; - loop { - gossip_sync_batches_iteration( - ctx, - &pool, - &mut next_batch_number, - &batches_persisted, - ) - .await?; - } - }); - #[tracing::instrument(skip_all)] async fn insert_block_certificates_iteration( ctx: &ctx::Ctx, @@ -291,9 +255,7 @@ impl StoreRunner { Err(InsertCertificateError::Canceled(err)) => { return Err(ctx::Error::Canceled(err)) } - Err(InsertCertificateError::Inner(err)) => { - return Err(ctx::Error::Internal(anyhow::Error::from(err))) - } + Err(err) => Err(err).context("insert_block_certificate()")?, } } @@ -340,7 +302,7 @@ impl storage::PersistentBlockStore for Store { &self, ctx: &ctx::Ctx, number: validator::BlockNumber, - ) -> ctx::Result { + ) -> ctx::Result { Ok(self .conn(ctx) .await? @@ -349,6 +311,41 @@ impl storage::PersistentBlockStore for Store { .context("not found")?) } + async fn verify_pregenesis_block( + &self, + ctx: &ctx::Ctx, + block: &validator::PreGenesisBlock, + ) -> ctx::Result<()> { + // We simply ask the main node for the payload hash and compare it against the received + // payload. + let meta = match &self.client { + None => self + .conn(ctx) + .await? + .block_metadata(ctx, block.number) + .await? + .context("metadata not in storage")?, + Some(client) => { + let meta = ctx + .wait(client.block_metadata(L2BlockNumber( + block.number.0.try_into().context("overflow")?, + ))) + .await? + .context("block_metadata()")? + .context("metadata not available")?; + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: false, + } + .proto_fmt(&meta.0) + .context("deserialize()")? + } + }; + if meta.payload_hash != block.payload.hash() { + return Err(anyhow::format_err!("payload hash mismatch").into()); + } + Ok(()) + } + /// If actions queue is set (and the block has not been stored yet), /// the block will be translated into a sequence of actions. /// The received actions should be fed @@ -357,19 +354,21 @@ impl storage::PersistentBlockStore for Store { /// `store_next_block()` call will wait synchronously for the L2 block. /// Once the L2 block is observed in storage, `store_next_block()` will store a cert for this /// L2 block. - async fn queue_next_block( - &self, - ctx: &ctx::Ctx, - block: validator::FinalBlock, - ) -> ctx::Result<()> { + async fn queue_next_block(&self, ctx: &ctx::Ctx, block: validator::Block) -> ctx::Result<()> { let mut payloads = sync::lock(ctx, &self.block_payloads).await?.into_async(); + let (p, j) = match &block { + validator::Block::Final(block) => (&block.payload, Some(&block.justification)), + validator::Block::PreGenesis(block) => (&block.payload, None), + }; if let Some(payloads) = &mut *payloads { payloads - .send(to_fetched_block(block.number(), &block.payload).context("to_fetched_block")?) + .send(to_fetched_block(block.number(), p).context("to_fetched_block")?) .await - .context("payload_queue.send()")?; + .context("payloads.send()")?; + } + if let Some(justification) = j { + self.block_certificates.send(justification.clone()); } - self.block_certificates.send(block.justification); Ok(()) } } @@ -456,43 +455,3 @@ impl PayloadManager for Store { Ok(()) } } - -#[async_trait::async_trait] -impl storage::PersistentBatchStore for Store { - /// Range of batches persisted in storage. - fn persisted(&self) -> sync::watch::Receiver { - self.batches_persisted.clone() - } - - /// Returns the batch with the given number. - async fn get_batch( - &self, - ctx: &ctx::Ctx, - number: attester::BatchNumber, - ) -> ctx::Result> { - self.conn(ctx) - .await? - .get_batch(ctx, number) - .await - .wrap("get_batch") - } - - /// Queue the batch to be persisted in storage. - /// - /// The caller [BatchStore] ensures that this is only called when the batch is the next expected one. - async fn queue_next_batch( - &self, - _ctx: &ctx::Ctx, - _batch: attester::SyncBatch, - ) -> ctx::Result<()> { - // Currently the gossiping of `SyncBatch` and the `BatchStoreState` is unconditionally started by the `Network::run_stream` in consensus, - // and as long as any node reports new batches available by updating the `PersistentBatchStore::persisted` here, the other nodes - // will start pulling the corresponding batches, which will end up being passed to this method. - // If we return an error here or panic, it will stop the whole consensus task tree due to the way scopes work, so instead just return immediately. - // In the future we have to validate the proof agains the L1 state root hash, which IIUC we can't do just yet. - - // Err(anyhow::format_err!("unimplemented: queue_next_batch should not be called until we have the stateless L1 batch story completed.").into()) - - Ok(()) - } -} diff --git a/core/node/consensus/src/storage/testonly.rs b/core/node/consensus/src/storage/testonly.rs index 65c464d98b9..2aed011d23c 100644 --- a/core/node/consensus/src/storage/testonly.rs +++ b/core/node/consensus/src/storage/testonly.rs @@ -7,8 +7,8 @@ use zksync_dal::CoreDal as _; use zksync_node_genesis::{insert_genesis_batch, mock_genesis_config, GenesisParams}; use zksync_node_test_utils::{recover, snapshot, Snapshot}; use zksync_types::{ - commitment::L1BatchWithMetadata, protocol_version::ProtocolSemanticVersion, - system_contracts::get_system_smart_contracts, L1BatchNumber, L2BlockNumber, ProtocolVersionId, + protocol_version::ProtocolSemanticVersion, system_contracts::get_system_smart_contracts, + L1BatchNumber, L2BlockNumber, ProtocolVersionId, }; use super::{Connection, ConnectionPool}; @@ -57,7 +57,7 @@ pub(crate) fn mock_genesis_params(protocol_version: ProtocolVersionId) -> Genesi GenesisParams::from_genesis_config( cfg, BaseSystemContracts::load_from_disk(), - get_system_smart_contracts(), + get_system_smart_contracts(false), ) .unwrap() } @@ -102,28 +102,6 @@ impl ConnectionPool { Ok(()) } - /// Waits for the `number` L1 batch. - pub async fn wait_for_batch( - &self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result { - const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(50); - loop { - if let Some(payload) = self - .connection(ctx) - .await - .wrap("connection()")? - .batch(ctx, number) - .await - .wrap("batch()")? - { - return Ok(payload); - } - ctx.sleep(POLL_INTERVAL).await?; - } - } - /// Takes a storage snapshot at the last sealed L1 batch. pub(crate) async fn snapshot(&self, ctx: &ctx::Ctx) -> ctx::Result { let mut conn = self.connection(ctx).await.wrap("connection()")?; @@ -152,21 +130,32 @@ impl ConnectionPool { Self(pool) } - /// Waits for `want_last` block to have certificate then fetches all L2 blocks with certificates. - pub async fn wait_for_block_certificates( + /// Waits for `want_last` block then fetches all L2 blocks with certificates. + pub async fn wait_for_blocks( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, - ) -> ctx::Result> { - self.wait_for_block_certificate(ctx, want_last).await?; + ) -> ctx::Result> { + const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(100); + let state = loop { + let state = self + .connection(ctx) + .await + .wrap("connection()")? + .block_store_state(ctx) + .await + .wrap("block_store_state()")?; + tracing::info!("state.next() = {}", state.next()); + if state.next() > want_last { + break state; + } + ctx.sleep(POLL_INTERVAL).await?; + }; + + assert_eq!(want_last.next(), state.next()); let mut conn = self.connection(ctx).await.wrap("connection()")?; - let range = conn - .block_certificates_range(ctx) - .await - .wrap("certificates_range()")?; - assert_eq!(want_last.next(), range.next()); - let mut blocks: Vec = vec![]; - for i in range.first.0..range.next().0 { + let mut blocks: Vec = vec![]; + for i in state.first.0..state.next().0 { let i = validator::BlockNumber(i); let block = conn.block(ctx, i).await.context("block()")?.unwrap(); blocks.push(block); @@ -174,13 +163,13 @@ impl ConnectionPool { Ok(blocks) } - /// Same as `wait_for_certificates`, but additionally verifies all the blocks against genesis. - pub async fn wait_for_block_certificates_and_verify( + /// Same as `wait_for_blocks`, but additionally verifies all certificates. + pub async fn wait_for_blocks_and_verify_certs( &self, ctx: &ctx::Ctx, want_last: validator::BlockNumber, - ) -> ctx::Result> { - let blocks = self.wait_for_block_certificates(ctx, want_last).await?; + ) -> ctx::Result> { + let blocks = self.wait_for_blocks(ctx, want_last).await?; let cfg = self .connection(ctx) .await @@ -190,7 +179,9 @@ impl ConnectionPool { .wrap("genesis()")? .context("genesis is missing")?; for block in &blocks { - block.verify(&cfg.genesis).context(block.number())?; + if let validator::Block::Final(block) = block { + block.verify(&cfg.genesis).context(block.number())?; + } } Ok(blocks) } @@ -228,19 +219,11 @@ impl ConnectionPool { let registry = registry::Registry::new(cfg.genesis.clone(), self.clone()).await; for i in first.0..want_last.0 { let i = attester::BatchNumber(i); - let hash = conn - .batch_hash(ctx, i) - .await - .wrap("batch_hash()")? - .context("hash missing")?; let cert = conn .batch_certificate(ctx, i) .await .wrap("batch_certificate")? .context("cert missing")?; - if cert.message.hash != hash { - return Err(anyhow::format_err!("cert[{i:?}]: hash mismatch").into()); - } let committee = registry .attester_committee_for(ctx, registry_addr, i) .await @@ -255,28 +238,30 @@ impl ConnectionPool { pub async fn prune_batches( &self, ctx: &ctx::Ctx, - last_batch: L1BatchNumber, + last_batch: attester::BatchNumber, ) -> ctx::Result<()> { let mut conn = self.connection(ctx).await.context("connection()")?; - let (_, last_block) = ctx - .wait( - conn.0 - .blocks_dal() - .get_l2_block_range_of_l1_batch(last_batch), - ) - .await? - .context("get_l2_block_range_of_l1_batch()")? - .context("batch not found")?; - conn.0 - .pruning_dal() - .soft_prune_batches_range(last_batch, last_block) - .await - .context("soft_prune_batches_range()")?; - conn.0 - .pruning_dal() - .hard_prune_batches_range(last_batch, last_block) + let (_, last_block) = conn + .get_l2_block_range_of_l1_batch(ctx, last_batch) .await - .context("hard_prune_batches_range()")?; + .wrap("get_l2_block_range_of_l1_batch()")? + .context("batch not found")?; + let last_batch = L1BatchNumber(last_batch.0.try_into().context("oveflow")?); + let last_block = L2BlockNumber(last_block.0.try_into().context("oveflow")?); + ctx.wait( + conn.0 + .pruning_dal() + .soft_prune_batches_range(last_batch, last_block), + ) + .await? + .context("soft_prune_batches_range()")?; + ctx.wait( + conn.0 + .pruning_dal() + .hard_prune_batches_range(last_batch, last_block), + ) + .await? + .context("hard_prune_batches_range()")?; Ok(()) } } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 1996928b26e..db433665e57 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -16,10 +16,7 @@ use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_network as network; use zksync_consensus_roles::{attester, validator, validator::testonly::Setup}; use zksync_dal::{CoreDal, DalError}; -use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; -use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MetadataCalculator, MetadataCalculatorConfig, -}; +use zksync_metadata_calculator::{MetadataCalculator, MetadataCalculatorConfig}; use zksync_node_api_server::web3::{state::InternalApiConfig, testonly::TestServerBuilder}; use zksync_node_genesis::GenesisParams; use zksync_node_sync::{ @@ -49,9 +46,8 @@ use zksync_types::{ use zksync_web3_decl::client::{Client, DynClient, L2}; use crate::{ - batch::{L1BatchCommit, L1BatchWithWitness, LastBlockCommit}, en, - storage::ConnectionPool, + storage::{ConnectionPool, Store}, }; /// Fake StateKeeper for tests. @@ -70,7 +66,6 @@ pub(super) struct StateKeeper { sync_state: SyncState, addr: sync::watch::Receiver>, pool: ConnectionPool, - tree_reader: LazyAsyncTreeReader, } #[derive(Clone)] @@ -78,6 +73,7 @@ pub(super) struct ConfigSet { net: network::Config, pub(super) config: config::ConsensusConfig, pub(super) secrets: config::ConsensusSecrets, + pub(super) enable_pregenesis: bool, } impl ConfigSet { @@ -87,11 +83,17 @@ impl ConfigSet { config: make_config(&net, None), secrets: make_secrets(&net, None), net, + enable_pregenesis: self.enable_pregenesis, } } } -pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { +pub(super) fn new_configs( + rng: &mut impl Rng, + setup: &Setup, + seed_peers: usize, + pregenesis: bool, +) -> Vec { let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), @@ -131,6 +133,7 @@ pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) config: make_config(&net, Some(genesis_spec.clone())), secrets: make_secrets(&net, setup.attester_keys.get(i).cloned()), net, + enable_pregenesis: pregenesis, }) .collect() } @@ -154,6 +157,7 @@ fn make_config( genesis_spec: Option, ) -> config::ConsensusConfig { config::ConsensusConfig { + port: Some(cfg.server_addr.port()), server_addr: *cfg.server_addr, public_addr: config::Host(cfg.public_addr.0.clone()), max_payload_size: usize::MAX, @@ -248,7 +252,6 @@ impl StateKeeper { let metadata_calculator = MetadataCalculator::new(config, None, pool.0.clone()) .await .context("MetadataCalculator::new()")?; - let tree_reader = metadata_calculator.tree_reader(); Ok(( Self { protocol_version, @@ -261,7 +264,6 @@ impl StateKeeper { sync_state: sync_state.clone(), addr: addr.subscribe(), pool: pool.clone(), - tree_reader, }, StateKeeperRunner { actions_queue, @@ -370,51 +372,14 @@ impl StateKeeper { } /// Batch of the `last_block`. - pub fn last_batch(&self) -> L1BatchNumber { - self.last_batch + pub fn last_batch(&self) -> attester::BatchNumber { + attester::BatchNumber(self.last_batch.0.into()) } /// Last L1 batch that has been sealed and will have /// metadata computed eventually. - pub fn last_sealed_batch(&self) -> L1BatchNumber { - self.last_batch - (!self.batch_sealed) as u32 - } - - /// Loads a commitment to L1 batch directly from the database. - // TODO: ideally, we should rather fake fetching it from Ethereum. - // We can use `zksync_eth_client::clients::MockEthereum` for that, - // which implements `EthInterface`. It should be enough to use - // `MockEthereum.with_call_handler()`. - pub async fn load_batch_commit( - &self, - ctx: &ctx::Ctx, - number: L1BatchNumber, - ) -> ctx::Result { - // TODO: we should mock the `eth_sender` as well. - let mut conn = self.pool.connection(ctx).await?; - let this = conn.batch(ctx, number).await?.context("missing batch")?; - let prev = conn - .batch(ctx, number - 1) - .await? - .context("missing batch")?; - Ok(L1BatchCommit { - number, - this_batch: LastBlockCommit { - info: StoredBatchInfo::from(&this).hash(), - }, - prev_batch: LastBlockCommit { - info: StoredBatchInfo::from(&prev).hash(), - }, - }) - } - - /// Loads an `L1BatchWithWitness`. - pub async fn load_batch_with_witness( - &self, - ctx: &ctx::Ctx, - n: L1BatchNumber, - ) -> ctx::Result { - L1BatchWithWitness::load(ctx, n, &self.pool, &self.tree_reader).await + pub fn last_sealed_batch(&self) -> attester::BatchNumber { + attester::BatchNumber((self.last_batch.0 - (!self.batch_sealed) as u32).into()) } /// Connects to the json RPC endpoint exposed by the state keeper. @@ -456,6 +421,40 @@ impl StateKeeper { .await } + pub async fn run_temporary_fetcher( + self, + ctx: &ctx::Ctx, + client: Box>, + ) -> ctx::Result<()> { + scope::run!(ctx, |ctx, s| async { + let payload_queue = self + .pool + .connection(ctx) + .await + .wrap("connection()")? + .new_payload_queue(ctx, self.actions_sender, self.sync_state.clone()) + .await + .wrap("new_payload_queue()")?; + let (store, runner) = Store::new( + ctx, + self.pool.clone(), + Some(payload_queue), + Some(client.clone()), + ) + .await + .wrap("Store::new()")?; + s.spawn_bg(async { Ok(runner.run(ctx).await?) }); + en::EN { + pool: self.pool.clone(), + client, + sync_state: self.sync_state.clone(), + } + .temporary_block_fetcher(ctx, &store) + .await + }) + .await + } + /// Runs consensus node for the external node. pub async fn run_consensus( self, @@ -474,6 +473,7 @@ impl StateKeeper { cfgs.config, cfgs.secrets, cfgs.net.build_version, + cfgs.enable_pregenesis, ) .await } @@ -571,7 +571,9 @@ impl StateKeeperRunner { self.pool.0.clone(), Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let io = ExternalIO::new( self.pool.0.clone(), @@ -678,7 +680,9 @@ impl StateKeeperRunner { self.pool.0.clone(), Some(ethabi::Address::repeat_byte(11)), 5, - ); + ) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(self.pool.0.clone()); let io = ExternalIO::new( diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index 35d849ae616..2701a986e9e 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use rand::Rng as _; -use test_casing::test_casing; +use test_casing::{test_casing, Product}; use tracing::Instrument as _; use zksync_concurrency::{ctx, error::Wrap, scope}; use zksync_consensus_roles::{ @@ -9,10 +9,10 @@ use zksync_consensus_roles::{ }; use zksync_dal::consensus_dal; use zksync_test_account::Account; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; +use zksync_types::ProtocolVersionId; use zksync_web3_decl::namespaces::EnNamespaceClient as _; -use super::VERSIONS; +use super::{POLL_INTERVAL, PREGENESIS, VERSIONS}; use crate::{ mn::run_main_node, registry::{testonly, Registry}, @@ -34,13 +34,13 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("validator"))); // Setup nontrivial genesis. - while sk.last_sealed_batch() < L1BatchNumber(3) { + while sk.last_sealed_batch() < attester::BatchNumber(3) { sk.push_random_blocks(rng, account, 10).await; } let mut setup = SetupSpec::new(rng, 3); setup.first_block = sk.last_block(); let first_batch = sk.last_batch(); - let setup = Setup::from(setup); + let setup = Setup::from_spec(rng, setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; conn.try_update_global_config( ctx, @@ -54,7 +54,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { .wrap("try_update_global_config()")?; // Make sure that the first_batch is actually sealed. sk.seal_batch().await; - pool.wait_for_batch(ctx, first_batch).await?; + pool.wait_for_batch_info(ctx, first_batch, POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // Connect to API endpoint. let api = sk.connect(ctx).await?; @@ -77,18 +79,18 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { let status = fetch_status().await?; assert_eq!( status.next_batch_to_attest, - attester::BatchNumber(first_batch.0.into()) + attester::BatchNumber(first_batch.0) ); tracing::info!("Insert a cert"); { let mut conn = pool.connection(ctx).await?; let number = status.next_batch_to_attest; - let hash = conn.batch_hash(ctx, number).await?.unwrap(); + let info = conn.batch_info(ctx, number).await?.unwrap(); let gcfg = conn.global_config(ctx).await?.unwrap(); let m = attester::Batch { number, - hash, + hash: consensus_dal::batch_hash(&info), genesis: gcfg.genesis.hash(), }; let mut sigs = attester::MultiSig::default(); @@ -124,9 +126,9 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { // Test running a couple of attesters (which are also validators). // Main node is expected to collect all certificates. // External nodes are expected to just vote for the batch. -#[test_casing(2, VERSIONS)] +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_multiple_attesters(version: ProtocolVersionId) { +async fn test_multiple_attesters(version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 4; zksync_concurrency::testonly::abort_on_panic(); @@ -135,7 +137,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId) { let account = &mut Account::random(); let to_fund = &[account.address]; let setup = Setup::new(rng, 4); - let mut cfgs = new_configs(rng, &setup, NODES); + let mut cfgs = new_configs(rng, &setup, NODES, pregenesis); scope::run!(ctx, |ctx, s| async { let validator_pool = ConnectionPool::test(false, version).await; let (mut validator, runner) = StateKeeper::new(ctx, validator_pool.clone()).await?; @@ -235,7 +237,7 @@ async fn test_multiple_attesters(version: ProtocolVersionId) { } tracing::info!("Wait for the batches to be attested"); - let want_last = attester::BatchNumber(validator.last_sealed_batch().0.into()); + let want_last = attester::BatchNumber(validator.last_sealed_batch().0); validator_pool .wait_for_batch_certificates_and_verify(ctx, want_last, Some(registry_addr)) .await?; diff --git a/core/node/consensus/src/tests/batch.rs b/core/node/consensus/src/tests/batch.rs deleted file mode 100644 index f0cae7f2c02..00000000000 --- a/core/node/consensus/src/tests/batch.rs +++ /dev/null @@ -1,124 +0,0 @@ -use test_casing::{test_casing, Product}; -use zksync_concurrency::{ctx, scope}; -use zksync_consensus_roles::validator; -use zksync_test_account::Account; -use zksync_types::{L1BatchNumber, ProtocolVersionId}; - -use super::{FROM_SNAPSHOT, VERSIONS}; -use crate::{storage::ConnectionPool, testonly}; - -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] -#[tokio::test] -async fn test_connection_get_batch(from_snapshot: bool, version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let pool = ConnectionPool::test(from_snapshot, version).await; - let account = &mut Account::random(); - - // Fill storage with unsigned L2 blocks and L1 batches in a way that the - // last L1 batch is guaranteed to have some L2 blocks executed in it. - scope::run!(ctx, |ctx, s| async { - // Start state keeper. - let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run(ctx)); - - for _ in 0..3 { - for _ in 0..2 { - sk.push_random_block(rng, account).await; - } - sk.seal_batch().await; - } - sk.push_random_block(rng, account).await; - - pool.wait_for_payload(ctx, sk.last_block()).await?; - - Ok(()) - }) - .await - .unwrap(); - - // Now we can try to retrieve the batch. - scope::run!(ctx, |ctx, _s| async { - let mut conn = pool.connection(ctx).await?; - let batches = conn.batches_range(ctx).await?; - let last = batches.last.expect("last is set"); - let (min, max) = conn - .get_l2_block_range_of_l1_batch(ctx, last) - .await? - .unwrap(); - - let last_batch = conn - .get_batch(ctx, last) - .await? - .expect("last batch can be retrieved"); - - assert_eq!( - last_batch.payloads.len(), - (max.0 - min.0) as usize, - "all block payloads present" - ); - - let first_payload = last_batch - .payloads - .first() - .expect("last batch has payloads"); - - let want_payload = conn.payload(ctx, min).await?.expect("payload is in the DB"); - let want_payload = want_payload.encode(); - - assert_eq!( - first_payload, &want_payload, - "first payload is the right number" - ); - - anyhow::Ok(()) - }) - .await - .unwrap(); -} - -/// Tests that generated L1 batch witnesses can be verified successfully. -/// TODO: add tests for verification failures. -#[test_casing(2, VERSIONS)] -#[tokio::test] -async fn test_batch_witness(version: ProtocolVersionId) { - zksync_concurrency::testonly::abort_on_panic(); - let ctx = &ctx::test_root(&ctx::RealClock); - let rng = &mut ctx.rng(); - let account = &mut Account::random(); - let to_fund = &[account.address]; - - scope::run!(ctx, |ctx, s| async { - let pool = ConnectionPool::from_genesis(version).await; - let (mut node, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; - s.spawn_bg(runner.run_real(ctx, to_fund)); - - tracing::info!("analyzing storage"); - { - let mut conn = pool.connection(ctx).await.unwrap(); - let mut n = validator::BlockNumber(0); - while let Some(p) = conn.payload(ctx, n).await? { - tracing::info!("block[{n}] = {p:?}"); - n = n + 1; - } - } - - // Seal a bunch of batches. - node.push_random_blocks(rng, account, 10).await; - node.seal_batch().await; - pool.wait_for_batch(ctx, node.last_sealed_batch()).await?; - // We can verify only 2nd batch onward, because - // batch witness verifies parent of the last block of the - // previous batch (and 0th batch contains only 1 block). - for n in 2..=node.last_sealed_batch().0 { - let n = L1BatchNumber(n); - let batch_with_witness = node.load_batch_with_witness(ctx, n).await?; - let commit = node.load_batch_commit(ctx, n).await?; - batch_with_witness.verify(&commit)?; - } - Ok(()) - }) - .await - .unwrap(); -} diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 52abe3c810c..8da17cfba8a 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -2,29 +2,121 @@ use anyhow::Context as _; use rand::Rng as _; use test_casing::{test_casing, Product}; use tracing::Instrument as _; -use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_concurrency::{ctx, error::Wrap as _, scope, time}; use zksync_config::configs::consensus as config; use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_roles::{ node, validator, validator::testonly::{Setup, SetupSpec}, }; -use zksync_consensus_storage::BlockStore; +use zksync_consensus_storage::{BlockStore, PersistentBlockStore}; use zksync_dal::consensus_dal; use zksync_test_account::Account; use zksync_types::ProtocolVersionId; +use zksync_web3_decl::namespaces::EnNamespaceClient as _; use crate::{ + en::TEMPORARY_FETCHER_THRESHOLD, mn::run_main_node, storage::{ConnectionPool, Store}, testonly, }; mod attestation; -mod batch; const VERSIONS: [ProtocolVersionId; 2] = [ProtocolVersionId::latest(), ProtocolVersionId::next()]; const FROM_SNAPSHOT: [bool; 2] = [true, false]; +const PREGENESIS: [bool; 2] = [true, false]; +const POLL_INTERVAL: time::Duration = time::Duration::milliseconds(500); + +#[test_casing(2, VERSIONS)] +#[tokio::test] +async fn test_verify_pregenesis_block(version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let account = &mut Account::random(); + let mut setup = SetupSpec::new(rng, 3); + setup.first_block = validator::BlockNumber(1000); + let setup = Setup::from_spec(rng, setup); + let cfg = consensus_dal::GlobalConfig { + genesis: setup.genesis.clone(), + registry_address: None, + seed_peers: [].into(), + }; + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Start state keeper."); + let pool = ConnectionPool::test(/*from_snapshot=*/ false, version).await; + pool.connection(ctx) + .await + .wrap("connection()")? + .try_update_global_config(ctx, &cfg) + .await + .wrap("try_update_global_config()")?; + let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + + tracing::info!("Populate storage with a bunch of blocks."); + sk.push_random_blocks(rng, account, 5).await; + sk.seal_batch().await; + let blocks: Vec<_> = pool + .wait_for_blocks(ctx, sk.last_block()) + .await + .context("wait_for_blocks()")? + .into_iter() + .map(|b| match b { + validator::Block::PreGenesis(b) => b, + _ => panic!(), + }) + .collect(); + assert!(!blocks.is_empty()); + + tracing::info!("Create another store"); + let pool = ConnectionPool::test(/*from_snapshot=*/ false, version).await; + pool.connection(ctx) + .await + .wrap("connection()")? + .try_update_global_config(ctx, &cfg) + .await + .wrap("try_update_global_config()")?; + let (store, runner) = Store::new( + ctx, + pool.clone(), + None, + Some(sk.connect(ctx).await.unwrap()), + ) + .await + .unwrap(); + s.spawn_bg(runner.run(ctx)); + + tracing::info!("All the blocks from the main node should be valid."); + for b in &blocks { + store.verify_pregenesis_block(ctx, b).await.unwrap(); + } + tracing::info!("Malformed blocks should not be valid"); + for b in &blocks { + let mut p = consensus_dal::Payload::decode(&b.payload).unwrap(); + // Arbitrary small change. + p.timestamp = rng.gen(); + store + .verify_pregenesis_block( + ctx, + &validator::PreGenesisBlock { + number: b.number, + justification: b.justification.clone(), + payload: p.encode(), + }, + ) + .await + .unwrap_err(); + } + + Ok(()) + }) + .await + .unwrap(); +} #[test_casing(2, VERSIONS)] #[tokio::test] @@ -36,7 +128,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { let account = &mut Account::random(); // Fill storage with unsigned L2 blocks. - // Fetch a suffix of blocks that we will generate (fake) certs for. + // Fetch a suffix of blocks that we will generate certs for. let want = scope::run!(ctx, |ctx, s| async { // Start state keeper. let (mut sk, runner) = testonly::StateKeeper::new(ctx, pool.clone()).await?; @@ -44,8 +136,9 @@ async fn test_validator_block_store(version: ProtocolVersionId) { sk.push_random_blocks(rng, account, 10).await; pool.wait_for_payload(ctx, sk.last_block()).await?; let mut setup = SetupSpec::new(rng, 3); - setup.first_block = validator::BlockNumber(4); - let mut setup = Setup::from(setup); + setup.first_block = validator::BlockNumber(0); + setup.first_pregenesis_block = setup.first_block; + let mut setup = Setup::from_spec(rng, setup); let mut conn = pool.connection(ctx).await.wrap("connection()")?; conn.try_update_global_config( ctx, @@ -75,7 +168,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // Insert blocks one by one and check the storage state. for (i, block) in want.iter().enumerate() { scope::run!(ctx, |ctx, s| async { - let (store, runner) = Store::new(ctx, pool.clone(), None).await.unwrap(); + let (store, runner) = Store::new(ctx, pool.clone(), None, None).await.unwrap(); s.spawn_bg(runner.run(ctx)); let (block_store, runner) = BlockStore::new(ctx, Box::new(store.clone())).await.unwrap(); @@ -85,10 +178,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { .wait_until_persisted(ctx, block.number()) .await .unwrap(); - let got = pool - .wait_for_block_certificates(ctx, block.number()) - .await - .unwrap(); + let got = pool.wait_for_blocks(ctx, block.number()).await.unwrap(); assert_eq!(want[..=i], got); Ok(()) }) @@ -100,14 +190,14 @@ async fn test_validator_block_store(version: ProtocolVersionId) { // In the current implementation, consensus certificates are created asynchronously // for the L2 blocks constructed by the StateKeeper. This means that consensus actor // is effectively just back filling the consensus certificates for the L2 blocks in storage. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_validator(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -149,9 +239,9 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { tracing::info!("Verify all certificates"); pool - .wait_for_block_certificates_and_verify(ctx, sk.last_block()) + .wait_for_blocks_and_verify_certs(ctx, sk.last_block()) .await - .context("wait_for_block_certificates_and_verify()")?; + .context("wait_for_blocks_and_verify_certs()")?; Ok(()) }) .await @@ -164,14 +254,14 @@ async fn test_validator(from_snapshot: bool, version: ProtocolVersionId) { } // Test running a validator node and 2 full nodes recovered from different snapshots. -#[test_casing(2, VERSIONS)] +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { +async fn test_nodes_from_various_snapshots(version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); scope::run!(ctx, |ctx, s| async { @@ -226,15 +316,15 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { tracing::info!("produce more blocks and compare storages"); validator.push_random_blocks(rng, account, 5).await; let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; // node stores should be suffixes for validator store. for got in [ node_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?, node_pool2 - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?, ] { assert_eq!(want[want.len() - got.len()..], got[..]); @@ -245,14 +335,14 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { .unwrap(); } -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let mut validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let mut validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -304,12 +394,12 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; assert_eq!( want, node_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); Ok(()) @@ -322,16 +412,16 @@ async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { // Test running a validator node and a couple of full nodes. // Validator is producing signed blocks and fetchers are expected to fetch // them directly or indirectly. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 2; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let account = &mut Account::random(); // topology: @@ -391,13 +481,15 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { // Note that block from before and after genesis have to be fetched. validator.push_random_blocks(rng, account, 5).await; let want_last = validator.last_block(); + tracing::info!("Waiting for the validator to produce block {want_last}."); let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; + tracing::info!("Waiting for the nodes to fetch block {want_last}."); for pool in &node_pools { assert_eq!( want, - pool.wait_for_block_certificates_and_verify(ctx, want_last) + pool.wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); } @@ -408,16 +500,16 @@ async fn test_full_nodes(from_snapshot: bool, version: ProtocolVersionId) { } // Test running external node (non-leader) validators. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { const NODES: usize = 3; zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, NODES); - let cfgs = testonly::new_configs(rng, &setup, 1); + let cfgs = testonly::new_configs(rng, &setup, 1, pregenesis); let account = &mut Account::random(); // Run all nodes in parallel. @@ -475,12 +567,12 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { main_node.push_random_blocks(rng, account, 5).await; let want_last = main_node.last_block(); let want = main_node_pool - .wait_for_block_certificates_and_verify(ctx, want_last) + .wait_for_blocks_and_verify_certs(ctx, want_last) .await?; for pool in &ext_node_pools { assert_eq!( want, - pool.wait_for_block_certificates_and_verify(ctx, want_last) + pool.wait_for_blocks_and_verify_certs(ctx, want_last) .await? ); } @@ -491,14 +583,18 @@ async fn test_en_validators(from_snapshot: bool, version: ProtocolVersionId) { } // Test fetcher back filling missing certs. -#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] #[tokio::test] -async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolVersionId) { +async fn test_p2p_fetcher_backfill_certs( + from_snapshot: bool, + version: ProtocolVersionId, + pregenesis: bool, +) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -555,10 +651,10 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg)); validator.push_random_blocks(rng, account, 3).await; let want = validator_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; let got = node_pool - .wait_for_block_certificates_and_verify(ctx, validator.last_block()) + .wait_for_blocks_and_verify_certs(ctx, validator.last_block()) .await?; assert_eq!(want, got); Ok(()) @@ -571,14 +667,144 @@ async fn test_p2p_fetcher_backfill_certs(from_snapshot: bool, version: ProtocolV .unwrap(); } -#[test_casing(2, VERSIONS)] +// Test temporary fetcher fetching blocks if a lot of certs are missing. +#[test_casing(8, Product((FROM_SNAPSHOT,VERSIONS,PREGENESIS)))] +#[tokio::test] +async fn test_temporary_fetcher(from_snapshot: bool, version: ProtocolVersionId, pregenesis: bool) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + // We force certs to be missing on EN by having 1 of the validators permanently offline. + // This way no blocks will be finalized at all, so no one will have certs. + let setup = Setup::new(rng, 2); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + // Wait for the consensus to be initialized. + while ctx.wait(client.consensus_global_config()).await??.is_none() { + ctx.sleep(time::Duration::milliseconds(100)).await?; + } + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + tracing::info!("Run centralized fetcher, so that there is a lot of certs missing."); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_fetcher(ctx, client.clone())); + validator + .push_random_blocks(rng, account, TEMPORARY_FETCHER_THRESHOLD as usize + 1) + .await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + tracing::info!( + "Run p2p fetcher. Blocks should be fetched by the temporary fetcher anyway." + ); + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + Ok(()) + }) + .await + .unwrap(); +} + +// Test that temporary fetcher terminates once enough blocks have certs. +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] #[tokio::test] -async fn test_with_pruning(version: ProtocolVersionId) { +async fn test_temporary_fetcher_termination(from_snapshot: bool, version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 1); + let pregenesis = true; + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + scope::run!(ctx, |ctx, s| async { + tracing::info!("Spawn validator."); + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + // API server needs at least 1 L1 batch to start. + validator.seal_batch().await; + let client = validator.connect(ctx).await?; + + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + // Run the EN so the consensus is initialized on EN and wait for it to sync. + scope::run!(ctx, |ctx, s| async { + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + s.spawn_bg(node.run_consensus(ctx, client.clone(), node_cfg.clone())); + validator.push_random_blocks(rng, account, 5).await; + node_pool + .wait_for_payload(ctx, validator.last_block()) + .await?; + Ok(()) + }) + .await + .unwrap(); + + // Run the temporary fetcher. It should terminate immediately, since EN is synced. + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx)); + node.run_temporary_fetcher(ctx, client).await?; + + Ok(()) + }) + .await + .unwrap(); +} + +#[test_casing(4, Product((VERSIONS,PREGENESIS)))] +#[tokio::test] +async fn test_with_pruning(version: ProtocolVersionId, pregenesis: bool) { zksync_concurrency::testonly::abort_on_panic(); let ctx = &ctx::test_root(&ctx::RealClock); let rng = &mut ctx.rng(); let setup = Setup::new(rng, 1); - let validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let validator_cfg = testonly::new_configs(rng, &setup, 0, pregenesis)[0].clone(); let node_cfg = validator_cfg.new_fullnode(rng); let account = &mut Account::random(); @@ -642,27 +868,28 @@ async fn test_with_pruning(version: ProtocolVersionId) { validator.push_random_blocks(rng, account, 5).await; validator.seal_batch().await; validator_pool - .wait_for_batch(ctx, validator.last_sealed_batch()) - .await?; + .wait_for_batch_info(ctx, validator.last_sealed_batch(), POLL_INTERVAL) + .await + .wrap("wait_for_batch_info()")?; // The main node is not supposed to be pruned. In particular `ConsensusDal::attestation_status` // does not look for where the last prune happened at, and thus if we prune the block genesis // points at, we might never be able to start the Executor. tracing::info!("Wait until the external node has all the batches we want to prune"); node_pool - .wait_for_batch(ctx, to_prune.next()) + .wait_for_batch_info(ctx, to_prune.next(), POLL_INTERVAL) .await - .context("wait_for_batch()")?; + .wrap("wait_for_batch_info()")?; tracing::info!("Prune some blocks and sync more"); node_pool .prune_batches(ctx, to_prune) .await - .context("prune_batches")?; + .wrap("prune_batches")?; validator.push_random_blocks(rng, account, 5).await; node_pool - .wait_for_block_certificates(ctx, validator.last_block()) + .wait_for_blocks(ctx, validator.last_block()) .await - .context("wait_for_block_certificates()")?; + .wrap("wait_for_blocks()")?; Ok(()) }) .await diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 149e6b3ccb0..46b84c34061 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -1,11 +1,15 @@ +use std::sync::Arc; + use anyhow::Context as _; use tokio::runtime::Handle; -use zksync_concurrency::{ctx, error::Wrap as _}; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; use zksync_consensus_roles::attester; use zksync_state::PostgresStorage; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; use zksync_types::{ethabi, fee::Fee, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256}; -use zksync_vm_executor::oneshot::{CallOrExecute, MainOneshotExecutor, OneshotEnvParameters}; +use zksync_vm_executor::oneshot::{ + CallOrExecute, MainOneshotExecutor, MultiVMBaseSystemContracts, OneshotEnvParameters, +}; use zksync_vm_interface::{ executor::OneshotExecutor, ExecutionResult, OneshotTracingParams, TxExecutionArgs, }; @@ -23,16 +27,17 @@ pub(crate) struct VM { impl VM { /// Constructs a new `VM` instance. pub async fn new(pool: ConnectionPool) -> Self { + let base_system_contracts = + scope::wait_blocking(MultiVMBaseSystemContracts::load_eth_call_blocking).await; Self { pool, // L2 chain ID and fee account don't seem to matter for calls, hence the use of default values. - options: OneshotEnvParameters::for_execution( + options: OneshotEnvParameters::new( + Arc::new(base_system_contracts), L2ChainId::default(), AccountTreeId::default(), u32::MAX, - ) - .await - .expect("OneshotExecutorOptions"), + ), executor: MainOneshotExecutor::new(usize::MAX), } } diff --git a/core/node/consistency_checker/src/lib.rs b/core/node/consistency_checker/src/lib.rs index d2d84669978..d87fcf935b0 100644 --- a/core/node/consistency_checker/src/lib.rs +++ b/core/node/consistency_checker/src/lib.rs @@ -22,7 +22,7 @@ use zksync_types::{ commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, ethabi::{ParamType, Token}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, Address, L1BatchNumber, ProtocolVersionId, H256, U256, }; @@ -233,8 +233,8 @@ impl LocalL1BatchCommitData { let da = detect_da(protocol_version, reference) .context("cannot detect DA source from reference commitment token")?; - // For `PubdataDA::Calldata`, it's required that the pubdata fits into a single blob. - if matches!(da, PubdataDA::Calldata) { + // For `PubdataSendingMode::Calldata`, it's required that the pubdata fits into a single blob. + if matches!(da, PubdataSendingMode::Calldata) { let pubdata_len = self .l1_batch .header @@ -268,7 +268,7 @@ impl LocalL1BatchCommitData { pub fn detect_da( protocol_version: ProtocolVersionId, reference: &Token, -) -> Result { +) -> Result { /// These are used by the L1 Contracts to indicate what DA layer is used for pubdata const PUBDATA_SOURCE_CALLDATA: u8 = 0; const PUBDATA_SOURCE_BLOBS: u8 = 1; @@ -279,7 +279,7 @@ pub fn detect_da( } if protocol_version.is_pre_1_4_2() { - return Ok(PubdataDA::Calldata); + return Ok(PubdataSendingMode::Calldata); } let reference = match reference { @@ -301,9 +301,9 @@ pub fn detect_da( ))), }; match last_reference_token.first() { - Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataDA::Calldata), - Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataDA::Blobs), - Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataDA::Custom), + Some(&byte) if byte == PUBDATA_SOURCE_CALLDATA => Ok(PubdataSendingMode::Calldata), + Some(&byte) if byte == PUBDATA_SOURCE_BLOBS => Ok(PubdataSendingMode::Blobs), + Some(&byte) if byte == PUBDATA_SOURCE_CUSTOM => Ok(PubdataSendingMode::Custom), Some(&byte) => Err(parse_error(format!( "unexpected first byte of the last reference token; expected one of [{PUBDATA_SOURCE_CALLDATA}, {PUBDATA_SOURCE_BLOBS}], \ got {byte}" diff --git a/core/node/consistency_checker/src/tests/mod.rs b/core/node/consistency_checker/src/tests/mod.rs index 296780513e0..b1c78b481a8 100644 --- a/core/node/consistency_checker/src/tests/mod.rs +++ b/core/node/consistency_checker/src/tests/mod.rs @@ -64,7 +64,7 @@ pub(crate) fn build_commit_tx_input_data( let tokens = CommitBatches { last_committed_l1_batch: &batches[0], l1_batches: batches, - pubdata_da: PubdataDA::Calldata, + pubdata_da: PubdataSendingMode::Calldata, mode, } .into_tokens(); @@ -168,7 +168,7 @@ fn build_commit_tx_input_data_is_correct(commitment_mode: L1BatchCommitmentMode) .unwrap(); assert_eq!( commit_data, - CommitBatchInfo::new(commitment_mode, batch, PubdataDA::Calldata).into_token(), + CommitBatchInfo::new(commitment_mode, batch, PubdataSendingMode::Calldata).into_token(), ); } } diff --git a/core/node/da_clients/Cargo.toml b/core/node/da_clients/Cargo.toml index 60b65067f48..fa2f15920bd 100644 --- a/core/node/da_clients/Cargo.toml +++ b/core/node/da_clients/Cargo.toml @@ -37,3 +37,6 @@ blake2b_simd.workspace = true jsonrpsee = { workspace = true, features = ["ws-client"] } parity-scale-codec = { workspace = true, features = ["derive"] } subxt-signer = { workspace = true, features = ["sr25519", "native"] } +reqwest = { workspace = true } +bytes = { workspace = true } +backon.workspace = true diff --git a/core/node/da_clients/src/avail/client.rs b/core/node/da_clients/src/avail/client.rs index 7718691bf18..46d652d5713 100644 --- a/core/node/da_clients/src/avail/client.rs +++ b/core/node/da_clients/src/avail/client.rs @@ -1,34 +1,133 @@ -use std::{fmt::Debug, sync::Arc}; +use std::{fmt::Debug, sync::Arc, time::Duration}; +use anyhow::anyhow; use async_trait::async_trait; use jsonrpsee::ws_client::WsClientBuilder; +use serde::{Deserialize, Serialize}; use subxt_signer::ExposeSecret; -use zksync_config::configs::da_client::avail::{AvailConfig, AvailSecrets}; +use zksync_config::configs::da_client::avail::{AvailClientConfig, AvailConfig, AvailSecrets}; use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, }; +use zksync_types::{ + ethabi::{self, Token}, + web3::contract::Tokenize, + H256, U256, +}; + +use crate::avail::sdk::{GasRelayClient, RawAvailClient}; -use crate::avail::sdk::RawAvailClient; +#[derive(Debug, Clone)] +enum AvailClientMode { + Default(Box), + GasRelay(GasRelayClient), +} /// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. #[derive(Debug, Clone)] pub struct AvailClient { config: AvailConfig, - sdk_client: Arc, + sdk_client: Arc, + api_client: Arc, // bridge API reqwest client +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +#[serde(rename_all = "camelCase")] +pub struct BridgeAPIResponse { + blob_root: Option, + bridge_root: Option, + data_root_index: Option, + data_root_proof: Option>, + leaf: Option, + leaf_index: Option, + leaf_proof: Option>, + range_hash: Option, + error: Option, +} + +#[derive(Deserialize, Serialize, Debug)] +#[serde(rename_all = "camelCase")] +struct MerkleProofInput { + // proof of inclusion for the data root + data_root_proof: Vec, + // proof of inclusion of leaf within blob/bridge root + leaf_proof: Vec, + // abi.encodePacked(startBlock, endBlock) of header range commitment on vectorx + range_hash: H256, + // index of the data root in the commitment tree + data_root_index: U256, + // blob root to check proof against, or reconstruct the data root + blob_root: H256, + // bridge root to check proof against, or reconstruct the data root + bridge_root: H256, + // leaf being proven + leaf: H256, + // index of the leaf in the blob/bridge root tree + leaf_index: U256, +} + +impl Tokenize for MerkleProofInput { + fn into_tokens(self) -> Vec { + vec![Token::Tuple(vec![ + Token::Array( + self.data_root_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::Array( + self.leaf_proof + .iter() + .map(|x| Token::FixedBytes(x.as_bytes().to_vec())) + .collect(), + ), + Token::FixedBytes(self.range_hash.as_bytes().to_vec()), + Token::Uint(self.data_root_index), + Token::FixedBytes(self.blob_root.as_bytes().to_vec()), + Token::FixedBytes(self.bridge_root.as_bytes().to_vec()), + Token::FixedBytes(self.leaf.as_bytes().to_vec()), + Token::Uint(self.leaf_index), + ])] + } } impl AvailClient { pub async fn new(config: AvailConfig, secrets: AvailSecrets) -> anyhow::Result { - let seed_phrase = secrets - .seed_phrase - .ok_or_else(|| anyhow::anyhow!("seed phrase"))?; - let sdk_client = RawAvailClient::new(config.app_id, seed_phrase.0.expose_secret()).await?; - - Ok(Self { - config, - sdk_client: Arc::new(sdk_client), - }) + let api_client = Arc::new(reqwest::Client::new()); + match config.config.clone() { + AvailClientConfig::GasRelay(conf) => { + let gas_relay_api_key = secrets + .gas_relay_api_key + .ok_or_else(|| anyhow::anyhow!("Gas relay API key is missing"))?; + let gas_relay_client = GasRelayClient::new( + &conf.gas_relay_api_url, + gas_relay_api_key.0.expose_secret(), + conf.max_retries, + Arc::clone(&api_client), + ) + .await?; + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::GasRelay(gas_relay_client)), + api_client, + }) + } + AvailClientConfig::FullClient(conf) => { + let seed_phrase = secrets + .seed_phrase + .ok_or_else(|| anyhow::anyhow!("Seed phrase is missing"))?; + // these unwraps are safe because we validate in protobuf config + let sdk_client = + RawAvailClient::new(conf.app_id, seed_phrase.0.expose_secret()).await?; + + Ok(Self { + config, + sdk_client: Arc::new(AvailClientMode::Default(Box::new(sdk_client))), + api_client, + }) + } + } } } @@ -39,37 +138,83 @@ impl DataAvailabilityClient for AvailClient { _: u32, // batch_number data: Vec, ) -> anyhow::Result { - let client = WsClientBuilder::default() - .build(self.config.api_node_url.as_str()) - .await - .map_err(to_non_retriable_da_error)?; + match self.sdk_client.as_ref() { + AvailClientMode::Default(client) => { + let default_config = match &self.config.config { + AvailClientConfig::FullClient(conf) => conf, + _ => unreachable!(), // validated in protobuf config + }; + let ws_client = WsClientBuilder::default() + .build(default_config.api_node_url.clone().as_str()) + .await + .map_err(to_non_retriable_da_error)?; - let extrinsic = self - .sdk_client - .build_extrinsic(&client, data) - .await - .map_err(to_non_retriable_da_error)?; + let extrinsic = client + .build_extrinsic(&ws_client, data) + .await + .map_err(to_non_retriable_da_error)?; - let block_hash = self - .sdk_client - .submit_extrinsic(&client, extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - let tx_id = self - .sdk_client - .get_tx_id(&client, block_hash.as_str(), extrinsic.as_str()) - .await - .map_err(to_non_retriable_da_error)?; - - Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + let block_hash = client + .submit_extrinsic(&ws_client, extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + let tx_id = client + .get_tx_id(&ws_client, block_hash.as_str(), extrinsic.as_str()) + .await + .map_err(to_non_retriable_da_error)?; + Ok(DispatchResponse::from(format!("{}:{}", block_hash, tx_id))) + } + AvailClientMode::GasRelay(client) => { + let (block_hash, extrinsic_index) = client + .post_data(data) + .await + .map_err(to_retriable_da_error)?; + Ok(DispatchResponse { + blob_id: format!("{:x}:{}", block_hash, extrinsic_index), + }) + } + } } async fn get_inclusion_data( &self, - _blob_id: &str, + blob_id: &str, ) -> anyhow::Result, DAError> { - // TODO: implement inclusion data retrieval - Ok(Some(InclusionData { data: vec![] })) + let (block_hash, tx_idx) = blob_id.split_once(':').ok_or_else(|| DAError { + error: anyhow!("Invalid blob ID format"), + is_retriable: false, + })?; + let url = format!( + "{}/eth/proof/{}?index={}", + self.config.bridge_api_url, block_hash, tx_idx + ); + + let response = self + .api_client + .get(&url) + .timeout(Duration::from_secs(self.config.timeout as u64)) + .send() + .await + .map_err(to_retriable_da_error)?; + + let bridge_api_data = response + .json::() + .await + .map_err(to_retriable_da_error)?; + + let attestation_data: MerkleProofInput = MerkleProofInput { + data_root_proof: bridge_api_data.data_root_proof.unwrap(), + leaf_proof: bridge_api_data.leaf_proof.unwrap(), + range_hash: bridge_api_data.range_hash.unwrap(), + data_root_index: bridge_api_data.data_root_index.unwrap(), + blob_root: bridge_api_data.blob_root.unwrap(), + bridge_root: bridge_api_data.bridge_root.unwrap(), + leaf: bridge_api_data.leaf.unwrap(), + leaf_index: bridge_api_data.leaf_index.unwrap(), + }; + Ok(Some(InclusionData { + data: ethabi::encode(&attestation_data.into_tokens()), + })) } fn clone_boxed(&self) -> Box { @@ -87,3 +232,10 @@ pub fn to_non_retriable_da_error(error: impl Into) -> DAError { is_retriable: false, } } + +pub fn to_retriable_da_error(error: impl Into) -> DAError { + DAError { + error: error.into(), + is_retriable: true, + } +} diff --git a/core/node/da_clients/src/avail/sdk.rs b/core/node/da_clients/src/avail/sdk.rs index 002422109d0..f693280ba4a 100644 --- a/core/node/da_clients/src/avail/sdk.rs +++ b/core/node/da_clients/src/avail/sdk.rs @@ -1,18 +1,22 @@ //! Minimal reimplementation of the Avail SDK client required for the DA client implementation. //! This is considered to be a temporary solution until a mature SDK is available on crates.io -use std::fmt::Debug; +use std::{fmt::Debug, sync::Arc, time}; +use backon::{ConstantBuilder, Retryable}; +use bytes::Bytes; use jsonrpsee::{ core::client::{Client, ClientT, Subscription, SubscriptionClientT}, rpc_params, }; use parity_scale_codec::{Compact, Decode, Encode}; use scale_encode::EncodeAsFields; +use serde::{Deserialize, Serialize}; use subxt_signer::{ bip39::Mnemonic, sr25519::{Keypair, Signature}, }; +use zksync_types::H256; use crate::avail::client::to_non_retriable_da_error; @@ -287,7 +291,7 @@ impl RawAvailClient { let status = sub.next().await.transpose()?; if status.is_some() && status.as_ref().unwrap().is_object() { - if let Some(block_hash) = status.unwrap().get("inBlock") { + if let Some(block_hash) = status.unwrap().get("finalized") { break block_hash .as_str() .ok_or_else(|| anyhow::anyhow!("Invalid block hash"))? @@ -369,3 +373,95 @@ fn ss58hash(data: &[u8]) -> Vec { ctx.update(data); ctx.finalize().to_vec() } + +/// An implementation of the `DataAvailabilityClient` trait that interacts with the Avail network. +#[derive(Debug, Clone)] +pub(crate) struct GasRelayClient { + api_url: String, + api_key: String, + max_retries: usize, + api_client: Arc, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmissionResponse { + submission_id: String, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPIStatusResponse { + submission: GasRelayAPISubmission, +} + +#[derive(Deserialize, Serialize, Debug, Clone)] +pub struct GasRelayAPISubmission { + block_hash: Option, + extrinsic_index: Option, +} + +impl GasRelayClient { + const DEFAULT_INCLUSION_DELAY: time::Duration = time::Duration::from_secs(60); + const RETRY_DELAY: time::Duration = time::Duration::from_secs(5); + pub(crate) async fn new( + api_url: &str, + api_key: &str, + max_retries: usize, + api_client: Arc, + ) -> anyhow::Result { + Ok(Self { + api_url: api_url.to_owned(), + api_key: api_key.to_owned(), + max_retries, + api_client, + }) + } + + pub(crate) async fn post_data(&self, data: Vec) -> anyhow::Result<(H256, u64)> { + let submit_url = format!("{}/user/submit_raw_data?token=ethereum", &self.api_url); + // send the data to the gas relay + let submit_response = self + .api_client + .post(&submit_url) + .body(Bytes::from(data)) + .header("Content-Type", "text/plain") + .header("Authorization", &self.api_key) + .send() + .await?; + + let submit_response = submit_response + .json::() + .await?; + + let status_url = format!( + "{}/user/get_submission_info?submission_id={}", + self.api_url, submit_response.submission_id + ); + + tokio::time::sleep(Self::DEFAULT_INCLUSION_DELAY).await; + let status_response = (|| async { + self.api_client + .get(&status_url) + .header("Authorization", &self.api_key) + .send() + .await + }) + .retry( + &ConstantBuilder::default() + .with_delay(Self::RETRY_DELAY) + .with_max_times(self.max_retries), + ) + .await?; + + let status_response = status_response.json::().await?; + let (block_hash, extrinsic_index) = ( + status_response.submission.block_hash.ok_or_else(|| { + anyhow::anyhow!("Block hash not found in the response from the gas relay") + })?, + status_response.submission.extrinsic_index.ok_or_else(|| { + anyhow::anyhow!("Extrinsic index not found in the response from the gas relay") + })?, + ); + + Ok((block_hash, extrinsic_index)) + } +} diff --git a/core/node/db_pruner/src/tests.rs b/core/node/db_pruner/src/tests.rs index bb05e08e411..99fbada423d 100644 --- a/core/node/db_pruner/src/tests.rs +++ b/core/node/db_pruner/src/tests.rs @@ -114,7 +114,6 @@ async fn insert_l2_blocks( l1_tx_count: 0, l2_tx_count: 0, fee_account_address: Address::repeat_byte(1), - pubdata_params: Default::default(), base_fee_per_gas: 0, gas_per_pubdata_limit: 0, batch_fee_input: Default::default(), @@ -123,6 +122,7 @@ async fn insert_l2_blocks( virtual_blocks: 0, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; conn.blocks_dal() diff --git a/core/node/eth_sender/src/aggregated_operations.rs b/core/node/eth_sender/src/aggregated_operations.rs index 2dfaf594265..5271d42d3b7 100644 --- a/core/node/eth_sender/src/aggregated_operations.rs +++ b/core/node/eth_sender/src/aggregated_operations.rs @@ -3,13 +3,17 @@ use std::ops; use zksync_l1_contract_interface::i_executor::methods::{ExecuteBatches, ProveBatches}; use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::L1BatchWithMetadata, - pubdata_da::PubdataDA, L1BatchNumber, ProtocolVersionId, + pubdata_da::PubdataSendingMode, L1BatchNumber, ProtocolVersionId, }; #[allow(clippy::large_enum_variant)] #[derive(Debug, Clone)] pub enum AggregatedOperation { - Commit(L1BatchWithMetadata, Vec, PubdataDA), + Commit( + L1BatchWithMetadata, + Vec, + PubdataSendingMode, + ), PublishProofOnchain(ProveBatches), Execute(ExecuteBatches), } diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index 4d8cd47734c..e4f84948c6e 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -14,7 +14,7 @@ use zksync_types::{ helpers::unix_timestamp_ms, l1::L1Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, L1BatchNumber, ProtocolVersionId, }; @@ -39,7 +39,7 @@ pub struct Aggregator { /// means no wait is needed: nonces will still provide the correct ordering of /// transactions. operate_4844_mode: bool, - pubdata_da: PubdataDA, + pubdata_da: PubdataSendingMode, commitment_mode: L1BatchCommitmentMode, priority_merkle_tree: MiniMerkleTree, } @@ -52,7 +52,7 @@ impl Aggregator { commitment_mode: L1BatchCommitmentMode, connection: &mut Connection<'_, Core>, ) -> anyhow::Result { - let pubdata_da = config.pubdata_sending_mode.into(); + let pubdata_da = config.pubdata_sending_mode; let priority_tree_start_index = config.priority_tree_start_index.unwrap_or(0); let priority_op_hashes = connection @@ -543,7 +543,7 @@ impl Aggregator { } } - pub fn pubdata_da(&self) -> PubdataDA { + pub fn pubdata_da(&self) -> PubdataSendingMode { self.pubdata_da } diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 2c87848dcc3..ebd1568edb6 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -23,7 +23,7 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result; } @@ -32,6 +32,7 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { pub(crate) struct GasAdjusterFeesOracle { pub gas_adjuster: Arc, pub max_acceptable_priority_fee_in_gwei: u64, + pub time_in_mempool_in_l1_blocks_cap: u32, } impl GasAdjusterFeesOracle { @@ -80,11 +81,16 @@ impl GasAdjusterFeesOracle { fn calculate_fees_no_blob_sidecar( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, ) -> Result { - // cap it at 6h to not allow nearly infinite values when a tx is stuck for a long time - let capped_time_in_mempool = min(time_in_mempool, 1800); - let mut base_fee_per_gas = self.gas_adjuster.get_base_fee(capped_time_in_mempool); + // we cap it to not allow nearly infinite values when a tx is stuck for a long time + let capped_time_in_mempool_in_l1_blocks = min( + time_in_mempool_in_l1_blocks, + self.time_in_mempool_in_l1_blocks_cap, + ); + let mut base_fee_per_gas = self + .gas_adjuster + .get_base_fee(capped_time_in_mempool_in_l1_blocks); self.assert_fee_is_not_zero(base_fee_per_gas, "base"); if let Some(previous_sent_tx) = previous_sent_tx { self.verify_base_fee_not_too_low_on_resend( @@ -162,14 +168,14 @@ impl EthFeesOracle for GasAdjusterFeesOracle { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result { let has_blob_sidecar = operator_type == OperatorType::Blob; if has_blob_sidecar { self.calculate_fees_with_blob_sidecar(previous_sent_tx) } else { - self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool) + self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool_in_l1_blocks) } } } diff --git a/core/node/eth_sender/src/eth_tx_aggregator.rs b/core/node/eth_sender/src/eth_tx_aggregator.rs index 1c618a53795..ccdc93440ac 100644 --- a/core/node/eth_sender/src/eth_tx_aggregator.rs +++ b/core/node/eth_sender/src/eth_tx_aggregator.rs @@ -19,7 +19,7 @@ use zksync_types::{ ethabi::{Function, Token}, l2_to_l1_log::UserL2ToL1Log, protocol_version::{L1VerifierConfig, PACKED_SEMVER_MINOR_MASK}, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, settlement::SettlementMode, web3::{contract::Error as Web3ContractError, BlockNumber}, Address, L2ChainId, ProtocolVersionId, SLChainId, H256, U256, @@ -144,19 +144,19 @@ impl EthTxAggregator { } pub(super) async fn get_multicall_data(&mut self) -> Result { - let calldata = self.generate_calldata_for_multicall(); + let (calldata, evm_emulator_hash_requested) = self.generate_calldata_for_multicall(); let args = CallFunctionArgs::new(&self.functions.aggregate3.name, calldata).for_contract( self.l1_multicall3_address, &self.functions.multicall_contract, ); let aggregate3_result: Token = args.call((*self.eth_client).as_ref()).await?; - self.parse_multicall_data(aggregate3_result) + self.parse_multicall_data(aggregate3_result, evm_emulator_hash_requested) } // Multicall's aggregate function accepts 1 argument - arrays of different contract calls. // The role of the method below is to tokenize input for multicall, which is actually a vector of tokens. // Each token describes a specific contract call. - pub(super) fn generate_calldata_for_multicall(&self) -> Vec { + pub(super) fn generate_calldata_for_multicall(&self) -> (Vec, bool) { const ALLOW_FAILURE: bool = false; // First zksync contract call @@ -215,14 +215,31 @@ impl EthTxAggregator { calldata: get_protocol_version_input, }; - // Convert structs into tokens and return vector with them - vec![ + let mut token_vec = vec![ get_bootloader_hash_call.into_token(), get_default_aa_hash_call.into_token(), get_verifier_params_call.into_token(), get_verifier_call.into_token(), get_protocol_version_call.into_token(), - ] + ]; + + let mut evm_emulator_hash_requested = false; + let get_l2_evm_emulator_hash_input = self + .functions + .get_evm_emulator_bytecode_hash + .as_ref() + .and_then(|f| f.encode_input(&[]).ok()); + if let Some(input) = get_l2_evm_emulator_hash_input { + let call = Multicall3Call { + target: self.state_transition_chain_contract, + allow_failure: ALLOW_FAILURE, + calldata: input, + }; + token_vec.insert(2, call.into_token()); + evm_emulator_hash_requested = true; + } + + (token_vec, evm_emulator_hash_requested) } // The role of the method below is to de-tokenize multicall call's result, which is actually a token. @@ -230,6 +247,7 @@ impl EthTxAggregator { pub(super) fn parse_multicall_data( &self, token: Token, + evm_emulator_hash_requested: bool, ) -> Result { let parse_error = |tokens: &[Token]| { Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( @@ -238,8 +256,9 @@ impl EthTxAggregator { }; if let Token::Array(call_results) = token { - // 5 calls are aggregated in multicall - if call_results.len() != 5 { + let number_of_calls = if evm_emulator_hash_requested { 6 } else { 5 }; + // 5 or 6 calls are aggregated in multicall + if call_results.len() != number_of_calls { return parse_error(&call_results); } let mut call_results_iterator = call_results.into_iter(); @@ -268,12 +287,31 @@ impl EthTxAggregator { ))); } let default_aa = H256::from_slice(&multicall3_default_aa); + + let evm_emulator = if evm_emulator_hash_requested { + let multicall3_evm_emulator = + Multicall3Result::from_token(call_results_iterator.next().unwrap())? + .return_data; + if multicall3_evm_emulator.len() != 32 { + return Err(EthSenderError::Parse(Web3ContractError::InvalidOutputType( + format!( + "multicall3 EVM emulator hash data is not of the len of 32: {:?}", + multicall3_evm_emulator + ), + ))); + } + Some(H256::from_slice(&multicall3_evm_emulator)) + } else { + None + }; + let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader, default_aa, + evm_emulator, }; - call_results_iterator.next().unwrap(); + call_results_iterator.next().unwrap(); // FIXME: why is this value requested? let multicall3_verifier_address = Multicall3Result::from_token(call_results_iterator.next().unwrap())?.return_data; @@ -446,11 +484,12 @@ impl EthTxAggregator { &self.functions.post_gateway_commit }; - let l1_batch_for_sidecar = if PubdataDA::Blobs == self.aggregator.pubdata_da() { - Some(l1_batches[0].clone()) - } else { - None - }; + let l1_batch_for_sidecar = + if PubdataSendingMode::Blobs == self.aggregator.pubdata_da() { + Some(l1_batches[0].clone()) + } else { + None + }; Self::encode_commit_data(encoding_fn, &commit_data, l1_batch_for_sidecar) } diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 450443a652d..1aa233114a0 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -48,6 +48,7 @@ impl EthTxManager { let fees_oracle = GasAdjusterFeesOracle { gas_adjuster, max_acceptable_priority_fee_in_gwei: config.max_acceptable_priority_fee_in_gwei, + time_in_mempool_in_l1_blocks_cap: config.time_in_mempool_in_l1_blocks_cap, }; let l1_interface = Box::new(RealL1Interface { ethereum_gateway, @@ -111,7 +112,7 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, tx: &EthTx, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, current_block: L1BlockNumber, ) -> Result { let previous_sent_tx = storage @@ -127,7 +128,7 @@ impl EthTxManager { pubdata_price: _, } = self.fees_oracle.calculate_fees( &previous_sent_tx, - time_in_mempool, + time_in_mempool_in_l1_blocks, self.operator_type(tx), )?; @@ -602,13 +603,18 @@ impl EthTxManager { .await? { // New gas price depends on the time this tx spent in mempool. - let time_in_mempool = l1_block_numbers.latest.0 - sent_at_block; + let time_in_mempool_in_l1_blocks = l1_block_numbers.latest.0 - sent_at_block; // We don't want to return early in case resend does not succeed - // the error is logged anyway, but early returns will prevent // sending new operations. let _ = self - .send_eth_tx(storage, &tx, time_in_mempool, l1_block_numbers.latest) + .send_eth_tx( + storage, + &tx, + time_in_mempool_in_l1_blocks, + l1_block_numbers.latest, + ) .await?; } Ok(()) diff --git a/core/node/eth_sender/src/publish_criterion.rs b/core/node/eth_sender/src/publish_criterion.rs index 52d861ce0af..30f0820b148 100644 --- a/core/node/eth_sender/src/publish_criterion.rs +++ b/core/node/eth_sender/src/publish_criterion.rs @@ -8,7 +8,7 @@ use zksync_types::{ aggregated_operations::AggregatedActionType, commitment::{L1BatchCommitmentMode, L1BatchWithMetadata}, ethabi, - pubdata_da::PubdataDA, + pubdata_da::PubdataSendingMode, L1BatchNumber, }; @@ -202,7 +202,7 @@ impl L1BatchPublishCriterion for GasCriterion { pub struct DataSizeCriterion { pub op: AggregatedActionType, pub data_limit: usize, - pub pubdata_da: PubdataDA, + pub pubdata_da: PubdataSendingMode, pub commitment_mode: L1BatchCommitmentMode, } diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index 3a62bdc8dbf..797db40919b 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use zksync_config::{ - configs::eth_sender::{ProofSendingMode, PubdataSendingMode, SenderConfig}, + configs::eth_sender::{ProofSendingMode, SenderConfig}, ContractsConfig, EthConfig, GasAdjusterConfig, }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; @@ -12,7 +12,7 @@ use zksync_node_test_utils::{create_l1_batch, l1_batch_metadata_to_commitment_ar use zksync_object_store::MockObjectStore; use zksync_types::{ aggregated_operations::AggregatedActionType, block::L1BatchHeader, - commitment::L1BatchCommitmentMode, eth_sender::EthTx, pubdata_da::PubdataDA, + commitment::L1BatchCommitmentMode, eth_sender::EthTx, pubdata_da::PubdataSendingMode, settlement::SettlementMode, Address, L1BatchNumber, ProtocolVersion, H256, }; @@ -23,6 +23,8 @@ use crate::{ Aggregator, EthTxAggregator, EthTxManager, }; +pub(super) const STATE_TRANSITION_CONTRACT_ADDRESS: Address = Address::repeat_byte(0xa0); + // Alias to conveniently call static methods of `ETHSender`. type MockEthTxManager = EthTxManager; @@ -172,7 +174,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -192,7 +194,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); l2_gateway.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -212,7 +214,7 @@ impl EthSenderTester { .with_non_ordering_confirmation(non_ordering_confirmations) .with_call_handler(move |call, _| { assert_eq!(call.to, Some(contracts_config.l1_multicall3_addr)); - crate::tests::mock_multicall_response() + crate::tests::mock_multicall_response(call) }) .build(); gateway_blobs.advance_block_number(Self::WAIT_CONFIRMATIONS); @@ -267,7 +269,7 @@ impl EthSenderTester { // ZKsync contract address Address::random(), contracts_config.l1_multicall3_addr, - Address::random(), + STATE_TRANSITION_CONTRACT_ADDRESS, Default::default(), custom_commit_sender_addr, SettlementMode::SettlesToL1, @@ -494,9 +496,9 @@ impl EthSenderTester { pub async fn save_commit_tx(&mut self, l1_batch_number: L1BatchNumber) -> EthTx { assert_eq!(l1_batch_number, self.next_l1_batch_number_to_commit); let pubdata_mode = if self.pubdata_sending_mode == PubdataSendingMode::Blobs { - PubdataDA::Blobs + PubdataSendingMode::Blobs } else { - PubdataDA::Calldata + PubdataSendingMode::Calldata }; let operation = AggregatedOperation::Commit( l1_batch_with_metadata( diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index db49564093f..aab6d2e43d7 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -1,7 +1,9 @@ use assert_matches::assert_matches; use test_casing::{test_casing, Product}; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_l1_contract_interface::i_executor::methods::ExecuteBatches; +use zksync_l1_contract_interface::{ + i_executor::methods::ExecuteBatches, multicall3::Multicall3Call, Tokenizable, +}; use zksync_node_test_utils::create_l1_batch; use zksync_types::{ aggregated_operations::AggregatedActionType, @@ -9,16 +11,19 @@ use zksync_types::{ commitment::{ L1BatchCommitmentMode, L1BatchMetaParameters, L1BatchMetadata, L1BatchWithMetadata, }, + ethabi, ethabi::Token, helpers::unix_timestamp_ms, + web3, web3::contract::Error, - ProtocolVersionId, H256, + Address, ProtocolVersionId, H256, }; use crate::{ abstract_l1_interface::OperatorType, aggregated_operations::AggregatedOperation, - tester::{EthSenderTester, TestL1Batch}, + tester::{EthSenderTester, TestL1Batch, STATE_TRANSITION_CONTRACT_ADDRESS}, + zksync_functions::ZkSyncFunctions, EthSenderError, }; @@ -38,21 +43,59 @@ const COMMITMENT_MODES: [L1BatchCommitmentMode; 2] = [ L1BatchCommitmentMode::Validium, ]; -pub(crate) fn mock_multicall_response() -> Token { - Token::Array(vec![ - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 96])]), - Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 32])]), - Token::Tuple(vec![ - Token::Bool(true), - Token::Bytes( +pub(crate) fn mock_multicall_response(call: &web3::CallRequest) -> Token { + let functions = ZkSyncFunctions::default(); + let evm_emulator_getter_signature = functions + .get_evm_emulator_bytecode_hash + .as_ref() + .map(ethabi::Function::short_signature); + let bootloader_signature = functions.get_l2_bootloader_bytecode_hash.short_signature(); + let default_aa_signature = functions + .get_l2_default_account_bytecode_hash + .short_signature(); + let evm_emulator_getter_signature = evm_emulator_getter_signature.as_ref().map(|sig| &sig[..]); + + let calldata = &call.data.as_ref().expect("no calldata").0; + assert_eq!(calldata[..4], functions.aggregate3.short_signature()); + let mut tokens = functions + .aggregate3 + .decode_input(&calldata[4..]) + .expect("invalid multicall"); + assert_eq!(tokens.len(), 1); + let Token::Array(tokens) = tokens.pop().unwrap() else { + panic!("Unexpected input: {tokens:?}"); + }; + + let calls = tokens.into_iter().map(Multicall3Call::from_token); + let response = calls.map(|call| { + let call = call.unwrap(); + assert_eq!(call.target, STATE_TRANSITION_CONTRACT_ADDRESS); + let output = match &call.calldata[..4] { + selector if selector == bootloader_signature => { + vec![1u8; 32] + } + selector if selector == default_aa_signature => { + vec![2u8; 32] + } + selector if Some(selector) == evm_emulator_getter_signature => { + vec![3u8; 32] + } + selector if selector == functions.get_verifier_params.short_signature() => { + vec![4u8; 96] + } + selector if selector == functions.get_verifier.short_signature() => { + vec![5u8; 32] + } + selector if selector == functions.get_protocol_version.short_signature() => { H256::from_low_u64_be(ProtocolVersionId::default() as u64) .0 - .to_vec(), - ), - ]), - ]) + .to_vec() + } + _ => panic!("unexpected call: {call:?}"), + }; + Token::Tuple(vec![Token::Bool(true), Token::Bytes(output)]) + }); + Token::Array(response.collect()) } pub(crate) fn l1_batch_with_metadata(header: L1BatchHeader) -> L1BatchWithMetadata { @@ -75,6 +118,7 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { zkporter_is_available: false, bootloader_code_hash: H256::default(), default_aa_code_hash: H256::default(), + evm_emulator_code_hash: None, protocol_version: Some(ProtocolVersionId::default()), }, aux_data_hash: H256::default(), @@ -660,22 +704,71 @@ async fn skipped_l1_batch_in_the_middle( Ok(()) } -#[test_casing(2, COMMITMENT_MODES)] +#[test_casing(2, [false, true])] #[test_log::test(tokio::test)] -async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { +async fn parsing_multicall_data(with_evm_emulator: bool) { let tester = EthSenderTester::new( ConnectionPool::::test_pool().await, vec![100; 100], false, true, - commitment_mode, + L1BatchCommitmentMode::Rollup, ) .await; - assert!(tester + let mut mock_response = vec![ + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![1u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![2u8; 32])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![4u8; 96])]), + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![5u8; 32])]), + Token::Tuple(vec![ + Token::Bool(true), + Token::Bytes( + H256::from_low_u64_be(ProtocolVersionId::latest() as u64) + .0 + .to_vec(), + ), + ]), + ]; + if with_evm_emulator { + mock_response.insert( + 2, + Token::Tuple(vec![Token::Bool(true), Token::Bytes(vec![3u8; 32])]), + ); + } + let mock_response = Token::Array(mock_response); + + let parsed = tester .aggregator - .parse_multicall_data(mock_multicall_response()) - .is_ok()); + .parse_multicall_data(mock_response, with_evm_emulator) + .unwrap(); + assert_eq!( + parsed.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + parsed.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + let expected_evm_emulator_hash = with_evm_emulator.then(|| H256::repeat_byte(3)); + assert_eq!( + parsed.base_system_contracts_hashes.evm_emulator, + expected_evm_emulator_hash + ); + assert_eq!(parsed.verifier_address, Address::repeat_byte(5)); + assert_eq!(parsed.protocol_version_id, ProtocolVersionId::latest()); +} + +#[test_log::test(tokio::test)] +async fn parsing_multicall_data_errors() { + let tester = EthSenderTester::new( + ConnectionPool::::test_pool().await, + vec![100; 100], + false, + true, + L1BatchCommitmentMode::Rollup, + ) + .await; let original_wrong_form_data = vec![ // should contain 5 tuples @@ -726,7 +819,7 @@ async fn test_parse_multicall_data(commitment_mode: L1BatchCommitmentMode) { assert_matches!( tester .aggregator - .parse_multicall_data(wrong_data_instance.clone()), + .parse_multicall_data(wrong_data_instance.clone(), true), Err(EthSenderError::Parse(Error::InvalidOutputType(_))) ); } @@ -743,6 +836,17 @@ async fn get_multicall_data(commitment_mode: L1BatchCommitmentMode) { commitment_mode, ) .await; - let multicall_data = tester.aggregator.get_multicall_data().await; - assert!(multicall_data.is_ok()); + + let data = tester.aggregator.get_multicall_data().await.unwrap(); + assert_eq!( + data.base_system_contracts_hashes.bootloader, + H256::repeat_byte(1) + ); + assert_eq!( + data.base_system_contracts_hashes.default_aa, + H256::repeat_byte(2) + ); + assert_eq!(data.base_system_contracts_hashes.evm_emulator, None); + assert_eq!(data.verifier_address, Address::repeat_byte(5)); + assert_eq!(data.protocol_version_id, ProtocolVersionId::latest()); } diff --git a/core/node/eth_sender/src/zksync_functions.rs b/core/node/eth_sender/src/zksync_functions.rs index 05c9805a4fc..f3e4998ef37 100644 --- a/core/node/eth_sender/src/zksync_functions.rs +++ b/core/node/eth_sender/src/zksync_functions.rs @@ -17,6 +17,7 @@ pub(super) struct ZkSyncFunctions { pub(super) get_l2_bootloader_bytecode_hash: Function, pub(super) get_l2_default_account_bytecode_hash: Function, pub(super) get_verifier: Function, + pub(super) get_evm_emulator_bytecode_hash: Option, pub(super) get_verifier_params: Function, pub(super) get_protocol_version: Function, @@ -37,6 +38,14 @@ fn get_function(contract: &Contract, name: &str) -> Function { .unwrap_or_else(|| panic!("{} function entry not found", name)) } +fn get_optional_function(contract: &Contract, name: &str) -> Option { + contract + .functions + .get(name) + .cloned() + .map(|mut functions| functions.pop().unwrap()) +} + impl Default for ZkSyncFunctions { fn default() -> Self { let zksync_contract = hyperchain_contract(); @@ -55,6 +64,8 @@ impl Default for ZkSyncFunctions { get_function(&zksync_contract, "getL2BootloaderBytecodeHash"); let get_l2_default_account_bytecode_hash = get_function(&zksync_contract, "getL2DefaultAccountBytecodeHash"); + let get_evm_emulator_bytecode_hash = + get_optional_function(&zksync_contract, "getL2EvmSimulatorBytecodeHash"); let get_verifier = get_function(&zksync_contract, "getVerifier"); let get_verifier_params = get_function(&zksync_contract, "getVerifierParams"); let get_protocol_version = get_function(&zksync_contract, "getProtocolVersion"); @@ -70,6 +81,7 @@ impl Default for ZkSyncFunctions { post_gateway_execute, get_l2_bootloader_bytecode_hash, get_l2_default_account_bytecode_hash, + get_evm_emulator_bytecode_hash, get_verifier, get_verifier_params, get_protocol_version, diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 54376bae82e..ac5fc86c6e9 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -46,7 +46,8 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { pub const RETRY_LIMIT: usize = 5; const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; -const TOO_MANY_RESULTS_RETH: &str = "query exceeds max block range"; +const TOO_MANY_RESULTS_RETH: &str = "length limit exceeded"; +const TOO_BIG_RANGE_RETH: &str = "query exceeds max block range"; const TOO_MANY_RESULTS_CHAINSTACK: &str = "range limit exceeded"; /// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). @@ -149,6 +150,7 @@ impl EthHttpQueryClient { if err_message.contains(TOO_MANY_RESULTS_INFURA) || err_message.contains(TOO_MANY_RESULTS_ALCHEMY) || err_message.contains(TOO_MANY_RESULTS_RETH) + || err_message.contains(TOO_BIG_RANGE_RETH) || err_message.contains(TOO_MANY_RESULTS_CHAINSTACK) { // get the numeric block ids diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index 6020b132ddb..1dc72dca3c2 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -245,8 +245,11 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - let tx = - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()).unwrap(); + let tx = Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap(); tx.try_into().unwrap() } @@ -272,10 +275,13 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx received_timestamp_ms: 0, }; // Convert to abi::Transaction and back, so that canonical_tx_hash is computed. - Transaction::try_from(abi::Transaction::try_from(Transaction::from(tx)).unwrap()) - .unwrap() - .try_into() - .unwrap() + Transaction::from_abi( + abi::Transaction::try_from(Transaction::from(tx)).unwrap(), + false, + ) + .unwrap() + .try_into() + .unwrap() } async fn create_test_watcher( diff --git a/core/node/fee_model/Cargo.toml b/core/node/fee_model/Cargo.toml index 8760b97d9db..a84a7c5c217 100644 --- a/core/node/fee_model/Cargo.toml +++ b/core/node/fee_model/Cargo.toml @@ -16,9 +16,7 @@ zksync_types.workspace = true zksync_dal.workspace = true zksync_config.workspace = true zksync_eth_client.workspace = true -zksync_utils.workspace = true zksync_web3_decl.workspace = true -bigdecimal.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index e43de3e34bf..6fce46f7722 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -6,9 +6,12 @@ use std::{ }; use tokio::sync::watch; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; +use zksync_config::GasAdjusterConfig; use zksync_eth_client::EthFeeInterface; -use zksync_types::{commitment::L1BatchCommitmentMode, L1_GAS_PER_PUBDATA_BYTE, U256}; +use zksync_types::{ + commitment::L1BatchCommitmentMode, pubdata_da::PubdataSendingMode, L1_GAS_PER_PUBDATA_BYTE, + U256, +}; use zksync_web3_decl::client::{DynClient, L1, L2}; use self::metrics::METRICS; @@ -317,14 +320,14 @@ impl TxParamsProvider for GasAdjuster { // smooth out base_fee increases in general. // In other words, in order to pay less fees, we are ready to wait longer. // But the longer we wait, the more we are ready to pay. - fn get_base_fee(&self, time_in_mempool: u32) -> u64 { + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64 { let a = self.config.pricing_formula_parameter_a; let b = self.config.pricing_formula_parameter_b; // Currently we use an exponential formula. // The alternative is a linear one: - // `let scale_factor = a + b * time_in_mempool as f64;` - let scale_factor = a * b.powf(time_in_mempool as f64); + // `let scale_factor = a + b * time_in_mempool_in_l1_blocks as f64;` + let scale_factor = a * b.powf(time_in_mempool_in_l1_blocks as f64); let median = self.base_fee_statistics.median(); METRICS.median_base_fee_per_gas.set(median); let new_fee = median as f64 * scale_factor; diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 47023203de0..ab649e2d7c9 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -1,9 +1,11 @@ use std::{collections::VecDeque, sync::RwLockReadGuard}; use test_casing::test_casing; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; +use zksync_config::GasAdjusterConfig; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; -use zksync_types::{commitment::L1BatchCommitmentMode, settlement::SettlementMode}; +use zksync_types::{ + commitment::L1BatchCommitmentMode, pubdata_da::PubdataSendingMode, settlement::SettlementMode, +}; use zksync_web3_decl::client::L2; use super::{GasAdjuster, GasStatistics, GasStatisticsInner}; diff --git a/core/node/fee_model/src/l1_gas_price/mod.rs b/core/node/fee_model/src/l1_gas_price/mod.rs index 2a5d63089ca..e23bccf27ee 100644 --- a/core/node/fee_model/src/l1_gas_price/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/mod.rs @@ -16,7 +16,7 @@ mod main_node_fetcher; /// This trait, as a bound, should only be used in components that actually sign and send transactions. pub trait TxParamsProvider: fmt::Debug + 'static + Send + Sync { /// Returns the recommended `max_fee_per_gas` value (EIP1559). - fn get_base_fee(&self, time_in_mempool: u32) -> u64; + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64; /// Returns the recommended `max_priority_fee_per_gas` value (EIP1559). fn get_priority_fee(&self) -> u64; diff --git a/core/node/fee_model/src/lib.rs b/core/node/fee_model/src/lib.rs index fe4f6a27ce2..380a279cccc 100644 --- a/core/node/fee_model/src/lib.rs +++ b/core/node/fee_model/src/lib.rs @@ -3,14 +3,9 @@ use std::{fmt, fmt::Debug, sync::Arc}; use anyhow::Context as _; use async_trait::async_trait; use zksync_dal::{ConnectionPool, Core, CoreDal}; -use zksync_types::{ - fee_model::{ - BaseTokenConversionRatio, BatchFeeInput, FeeModelConfig, FeeModelConfigV2, FeeParams, - FeeParamsV1, FeeParamsV2, L1PeggedBatchFeeModelInput, PubdataIndependentBatchFeeModelInput, - }, - U256, +use zksync_types::fee_model::{ + BaseTokenConversionRatio, BatchFeeInput, FeeModelConfig, FeeParams, FeeParamsV1, FeeParamsV2, }; -use zksync_utils::ceil_div_u256; use crate::l1_gas_price::GasAdjuster; @@ -34,13 +29,7 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { l1_pubdata_price_scale_factor: f64, ) -> anyhow::Result { let params = self.get_fee_model_params(); - Ok( - ::default_batch_fee_input_scaled( - params, - l1_gas_price_scale_factor, - l1_pubdata_price_scale_factor, - ), - ) + Ok(params.scale(l1_gas_price_scale_factor, l1_pubdata_price_scale_factor)) } /// Returns the fee model parameters using the denomination of the base token used (WEI for ETH). @@ -48,27 +37,6 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { } impl dyn BatchFeeModelInputProvider { - /// Provides the default implementation of `get_batch_fee_input_scaled()` given [`FeeParams`]. - pub fn default_batch_fee_input_scaled( - params: FeeParams, - l1_gas_price_scale_factor: f64, - l1_pubdata_price_scale_factor: f64, - ) -> BatchFeeInput { - match params { - FeeParams::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( - params, - l1_gas_price_scale_factor, - )), - FeeParams::V2(params) => BatchFeeInput::PubdataIndependent( - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2( - params, - l1_gas_price_scale_factor, - l1_pubdata_price_scale_factor, - )), - ), - } - } - /// Returns the batch fee input as-is, i.e. without any scaling for the L1 gas and pubdata prices. pub async fn get_batch_fee_input(&self) -> anyhow::Result { self.get_batch_fee_input_scaled(1.0, 1.0).await @@ -168,122 +136,6 @@ impl BatchFeeModelInputProvider for ApiFeeInputProvider { } } -/// Calculates the batch fee input based on the main node parameters. -/// This function uses the `V1` fee model, i.e. where the pubdata price does not include the proving costs. -fn compute_batch_fee_model_input_v1( - params: FeeParamsV1, - l1_gas_price_scale_factor: f64, -) -> L1PeggedBatchFeeModelInput { - let l1_gas_price = (params.l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; - - L1PeggedBatchFeeModelInput { - l1_gas_price, - fair_l2_gas_price: params.config.minimal_l2_gas_price, - } -} - -/// Calculates the batch fee input based on the main node parameters. -/// This function uses the `V2` fee model, i.e. where the pubdata price does not include the proving costs. -fn compute_batch_fee_model_input_v2( - params: FeeParamsV2, - l1_gas_price_scale_factor: f64, - l1_pubdata_price_scale_factor: f64, -) -> PubdataIndependentBatchFeeModelInput { - let config = params.config(); - let l1_gas_price = params.l1_gas_price(); - let l1_pubdata_price = params.l1_pubdata_price(); - - let FeeModelConfigV2 { - minimal_l2_gas_price, - compute_overhead_part, - pubdata_overhead_part, - batch_overhead_l1_gas, - max_gas_per_batch, - max_pubdata_per_batch, - } = config; - - // Firstly, we scale the gas price and pubdata price in case it is needed. - let l1_gas_price = (l1_gas_price as f64 * l1_gas_price_scale_factor) as u64; - let l1_pubdata_price = (l1_pubdata_price as f64 * l1_pubdata_price_scale_factor) as u64; - - // While the final results of the calculations are not expected to have any overflows, the intermediate computations - // might, so we use U256 for them. - let l1_batch_overhead_wei = U256::from(l1_gas_price) * U256::from(batch_overhead_l1_gas); - - let fair_l2_gas_price = { - // Firstly, we calculate which part of the overall overhead each unit of L2 gas should cover. - let l1_batch_overhead_per_gas = - ceil_div_u256(l1_batch_overhead_wei, U256::from(max_gas_per_batch)); - - // Then, we multiply by the `compute_overhead_part` to get the overhead for the computation for each gas. - // Also, this means that if we almost never close batches because of compute, the `compute_overhead_part` should be zero and so - // it is possible that the computation costs include for no overhead. - let gas_overhead_wei = - (l1_batch_overhead_per_gas.as_u64() as f64 * compute_overhead_part) as u64; - - // We sum up the minimal L2 gas price (i.e. the raw prover/compute cost of a single L2 gas) and the overhead for batch being closed. - minimal_l2_gas_price + gas_overhead_wei - }; - - let fair_pubdata_price = { - // Firstly, we calculate which part of the overall overhead each pubdata byte should cover. - let l1_batch_overhead_per_pubdata = - ceil_div_u256(l1_batch_overhead_wei, U256::from(max_pubdata_per_batch)); - - // Then, we multiply by the `pubdata_overhead_part` to get the overhead for each pubdata byte. - // Also, this means that if we almost never close batches because of pubdata, the `pubdata_overhead_part` should be zero and so - // it is possible that the pubdata costs include no overhead. - let pubdata_overhead_wei = - (l1_batch_overhead_per_pubdata.as_u64() as f64 * pubdata_overhead_part) as u64; - - // We sum up the raw L1 pubdata price (i.e. the expected price of publishing a single pubdata byte) and the overhead for batch being closed. - l1_pubdata_price + pubdata_overhead_wei - }; - - PubdataIndependentBatchFeeModelInput { - l1_gas_price, - fair_l2_gas_price, - fair_pubdata_price, - } -} - -/// Bootloader places limitations on fair_l2_gas_price and fair_pubdata_price. -/// (MAX_ALLOWED_FAIR_L2_GAS_PRICE and MAX_ALLOWED_FAIR_PUBDATA_PRICE in bootloader code respectively) -/// Server needs to clip this prices in order to allow chain continues operation at a loss. The alternative -/// would be to stop accepting the transactions until the conditions improve. -/// TODO (PE-153): to be removed when bootloader limitation is removed -fn clip_batch_fee_model_input_v2( - fee_model: PubdataIndependentBatchFeeModelInput, -) -> PubdataIndependentBatchFeeModelInput { - /// MAX_ALLOWED_FAIR_L2_GAS_PRICE - const MAXIMUM_L2_GAS_PRICE: u64 = 10_000_000_000_000; - /// MAX_ALLOWED_FAIR_PUBDATA_PRICE - const MAXIMUM_PUBDATA_PRICE: u64 = 1_000_000_000_000_000; - PubdataIndependentBatchFeeModelInput { - l1_gas_price: fee_model.l1_gas_price, - fair_l2_gas_price: if fee_model.fair_l2_gas_price < MAXIMUM_L2_GAS_PRICE { - fee_model.fair_l2_gas_price - } else { - tracing::warn!( - "Fair l2 gas price {} exceeds maximum. Limitting to {}", - fee_model.fair_l2_gas_price, - MAXIMUM_L2_GAS_PRICE - ); - MAXIMUM_L2_GAS_PRICE - }, - fair_pubdata_price: if fee_model.fair_pubdata_price < MAXIMUM_PUBDATA_PRICE { - fee_model.fair_pubdata_price - } else { - tracing::warn!( - "Fair pubdata price {} exceeds maximum. Limitting to {}", - fee_model.fair_pubdata_price, - MAXIMUM_PUBDATA_PRICE - ); - MAXIMUM_PUBDATA_PRICE - }, - } -} - /// Mock [`BatchFeeModelInputProvider`] implementation that returns a constant value. /// Intended to be used in tests only. #[derive(Debug)] @@ -307,308 +159,17 @@ mod tests { use std::num::NonZeroU64; use l1_gas_price::GasAdjusterClient; - use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig}; + use zksync_config::GasAdjusterConfig; use zksync_eth_client::{clients::MockSettlementLayer, BaseFees}; - use zksync_types::{commitment::L1BatchCommitmentMode, fee_model::BaseTokenConversionRatio}; + use zksync_types::{ + commitment::L1BatchCommitmentMode, + fee_model::{BaseTokenConversionRatio, FeeModelConfigV2}, + pubdata_da::PubdataSendingMode, + U256, + }; use super::*; - // To test that overflow never happens, we'll use giant L1 gas price, i.e. - // almost realistic very large value of 100k gwei. Since it is so large, we'll also - // use it for the L1 pubdata price. - const GWEI: u64 = 1_000_000_000; - const GIANT_L1_GAS_PRICE: u64 = 100_000 * GWEI; - - // As a small L2 gas price we'll use the value of 1 wei. - const SMALL_L1_GAS_PRICE: u64 = 1; - - #[test] - fn test_compute_batch_fee_model_input_v2_giant_numbers() { - let config = FeeModelConfigV2 { - minimal_l2_gas_price: GIANT_L1_GAS_PRICE, - // We generally don't expect those values to be larger than 1. Still, in theory the operator - // may need to set higher values in extreme cases. - compute_overhead_part: 5.0, - pubdata_overhead_part: 5.0, - // The batch overhead would likely never grow beyond that - batch_overhead_l1_gas: 1_000_000, - // Let's imagine that for some reason the limit is relatively small - max_gas_per_batch: 50_000_000, - // The pubdata will likely never go below that - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - // We'll use scale factor of 3.0 - let input = compute_batch_fee_model_input_v2(params, 3.0, 3.0); - - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE * 3); - assert_eq!(input.fair_l2_gas_price, 130_000_000_000_000); - assert_eq!(input.fair_pubdata_price, 15_300_000_000_000_000); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_small_numbers() { - // Here we assume that the operator wants to make the lives of users as cheap as possible. - let config = FeeModelConfigV2 { - minimal_l2_gas_price: SMALL_L1_GAS_PRICE, - compute_overhead_part: 0.0, - pubdata_overhead_part: 0.0, - batch_overhead_l1_gas: 0, - max_gas_per_batch: 50_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - SMALL_L1_GAS_PRICE, - SMALL_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - - assert_eq!(input.l1_gas_price, SMALL_L1_GAS_PRICE); - assert_eq!(input.fair_l2_gas_price, SMALL_L1_GAS_PRICE); - assert_eq!(input.fair_pubdata_price, SMALL_L1_GAS_PRICE); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_only_pubdata_overhead() { - // Here we use sensible config, but when only pubdata is used to close the batch - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 0.0, - pubdata_overhead_part: 1.0, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); - // The fair L2 gas price is identical to the minimal one. - assert_eq!(input.fair_l2_gas_price, 100_000_000_000); - // The fair pubdata price is the minimal one plus the overhead. - assert_eq!(input.fair_pubdata_price, 800_000_000_000_000); - } - - #[test] - fn test_compute_baxtch_fee_model_input_v2_only_compute_overhead() { - // Here we use sensible config, but when only compute is used to close the batch - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 1.0, - pubdata_overhead_part: 0.0, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GIANT_L1_GAS_PRICE, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = compute_batch_fee_model_input_v2(params, 1.0, 1.0); - assert_eq!(input.l1_gas_price, GIANT_L1_GAS_PRICE); - // The fair L2 gas price is identical to the minimal one, plus the overhead - assert_eq!(input.fair_l2_gas_price, 240_000_000_000); - // The fair pubdata price is equal to the original one. - assert_eq!(input.fair_pubdata_price, GIANT_L1_GAS_PRICE); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_param_tweaking() { - // In this test we generally checking that each param behaves as expected - let base_config = FeeModelConfigV2 { - minimal_l2_gas_price: 100_000_000_000, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let base_params = FeeParamsV2::new( - base_config, - 1_000_000_000, - 1_000_000_000, - BaseTokenConversionRatio::default(), - ); - - let base_input = compute_batch_fee_model_input_v2(base_params, 1.0, 1.0); - - let base_input_larger_l1_gas_price = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - base_config, - 2_000_000_000, // double the L1 gas price - 1_000_000_000, - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - let base_input_scaled_l1_gas_price = - compute_batch_fee_model_input_v2(base_params, 2.0, 1.0); - assert_eq!( - base_input_larger_l1_gas_price, base_input_scaled_l1_gas_price, - "Scaling has the correct effect for the L1 gas price" - ); - assert!( - base_input.fair_l2_gas_price < base_input_larger_l1_gas_price.fair_l2_gas_price, - "L1 gas price increase raises L2 gas price" - ); - assert!( - base_input.fair_pubdata_price < base_input_larger_l1_gas_price.fair_pubdata_price, - "L1 gas price increase raises pubdata price" - ); - - let base_input_larger_pubdata_price = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - base_config, - 1_000_000_000, - 2_000_000_000, // double the L1 pubdata price - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - let base_input_scaled_pubdata_price = - compute_batch_fee_model_input_v2(base_params, 1.0, 2.0); - assert_eq!( - base_input_larger_pubdata_price, base_input_scaled_pubdata_price, - "Scaling has the correct effect for the pubdata price" - ); - assert_eq!( - base_input.fair_l2_gas_price, base_input_larger_pubdata_price.fair_l2_gas_price, - "L1 pubdata increase has no effect on L2 gas price" - ); - assert!( - base_input.fair_pubdata_price < base_input_larger_pubdata_price.fair_pubdata_price, - "Pubdata price increase raises pubdata price" - ); - - let base_input_larger_max_gas = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - FeeModelConfigV2 { - max_gas_per_batch: base_config.max_gas_per_batch * 2, - ..base_config - }, - base_params.l1_gas_price(), - base_params.l1_pubdata_price(), - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - assert!( - base_input.fair_l2_gas_price > base_input_larger_max_gas.fair_l2_gas_price, - "Max gas increase lowers L2 gas price" - ); - assert_eq!( - base_input.fair_pubdata_price, base_input_larger_max_gas.fair_pubdata_price, - "Max gas increase has no effect on pubdata price" - ); - - let base_input_larger_max_pubdata = compute_batch_fee_model_input_v2( - FeeParamsV2::new( - FeeModelConfigV2 { - max_pubdata_per_batch: base_config.max_pubdata_per_batch * 2, - ..base_config - }, - base_params.l1_gas_price(), - base_params.l1_pubdata_price(), - BaseTokenConversionRatio::default(), - ), - 1.0, - 1.0, - ); - assert_eq!( - base_input.fair_l2_gas_price, base_input_larger_max_pubdata.fair_l2_gas_price, - "Max pubdata increase has no effect on L2 gas price" - ); - assert!( - base_input.fair_pubdata_price > base_input_larger_max_pubdata.fair_pubdata_price, - "Max pubdata increase lowers pubdata price" - ); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_l1_gas() { - // In this test we check the gas price limit works as expected - let config = FeeModelConfigV2 { - minimal_l2_gas_price: 100 * GWEI, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let l1_gas_price = 1_000_000_000 * GWEI; - let params = FeeParamsV2::new( - config, - l1_gas_price, - GIANT_L1_GAS_PRICE, - BaseTokenConversionRatio::default(), - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, l1_gas_price); - // The fair L2 gas price is identical to the maximum - assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); - assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); - } - - #[test] - fn test_compute_batch_fee_model_input_v2_gas_price_over_limit_due_to_conversion_rate() { - // In this test we check the gas price limit works as expected - let config = FeeModelConfigV2 { - minimal_l2_gas_price: GWEI, - compute_overhead_part: 0.5, - pubdata_overhead_part: 0.5, - batch_overhead_l1_gas: 700_000, - max_gas_per_batch: 500_000_000, - max_pubdata_per_batch: 100_000, - }; - - let params = FeeParamsV2::new( - config, - GWEI, - 2 * GWEI, - BaseTokenConversionRatio { - numerator: NonZeroU64::new(3_000_000).unwrap(), - denominator: NonZeroU64::new(1).unwrap(), - }, - ); - - let input = - clip_batch_fee_model_input_v2(compute_batch_fee_model_input_v2(params, 1.0, 1.0)); - assert_eq!(input.l1_gas_price, 3_000_000 * GWEI); - // The fair L2 gas price is identical to the maximum - assert_eq!(input.fair_l2_gas_price, 10_000 * GWEI); - assert_eq!(input.fair_pubdata_price, 1_000_000 * GWEI); - } - #[derive(Debug, Clone)] struct DummyTokenRatioProvider { ratio: BaseTokenConversionRatio, diff --git a/core/node/genesis/src/lib.rs b/core/node/genesis/src/lib.rs index 708297b08aa..e549ed5eba1 100644 --- a/core/node/genesis/src/lib.rs +++ b/core/node/genesis/src/lib.rs @@ -115,6 +115,7 @@ impl GenesisParams { default_aa: config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: config.evm_emulator_hash, }; if base_system_contracts_hashes != base_system_contracts.hashes() { return Err(GenesisError::BaseSystemContractsHashes(Box::new( @@ -135,15 +136,18 @@ impl GenesisParams { } pub fn load_genesis_params(config: GenesisConfig) -> Result { - let base_system_contracts = BaseSystemContracts::load_from_disk(); - let system_contracts = get_system_smart_contracts(); + let mut base_system_contracts = BaseSystemContracts::load_from_disk(); + if config.evm_emulator_hash.is_some() { + base_system_contracts = base_system_contracts.with_latest_evm_emulator(); + } + let system_contracts = get_system_smart_contracts(config.evm_emulator_hash.is_some()); Self::from_genesis_config(config, base_system_contracts, system_contracts) } pub fn mock() -> Self { Self { base_system_contracts: BaseSystemContracts::load_from_disk(), - system_contracts: get_system_smart_contracts(), + system_contracts: get_system_smart_contracts(false), config: mock_genesis_config(), } } @@ -183,6 +187,7 @@ pub fn mock_genesis_config() -> GenesisConfig { genesis_commitment: Some(H256::default()), bootloader_hash: Some(base_system_contracts_hashes.bootloader), default_aa_hash: Some(base_system_contracts_hashes.default_aa), + evm_emulator_hash: base_system_contracts_hashes.evm_emulator, l1_chain_id: L1ChainId(9), sl_chain_id: None, l2_chain_id: L2ChainId::default(), @@ -246,6 +251,7 @@ pub async fn insert_genesis_batch( .config .default_aa_hash .ok_or(GenesisError::MalformedConfig("default_aa_hash"))?, + evm_emulator: genesis_params.config.evm_emulator_hash, }; let commitment_input = CommitmentInput::for_genesis_batch( genesis_root_hash, @@ -404,6 +410,7 @@ pub async fn create_genesis_l1_batch( base_system_contracts.hashes(), protocol_version.minor, ); + let batch_fee_input = BatchFeeInput::pubdata_independent(0, 0, 0); let genesis_l2_block_header = L2BlockHeader { number: L2BlockNumber(0), @@ -413,14 +420,14 @@ pub async fn create_genesis_l1_batch( l2_tx_count: 0, fee_account_address: Default::default(), base_fee_per_gas: 0, - pubdata_params: Default::default(), gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(protocol_version.minor.into()), - batch_fee_input: BatchFeeInput::l1_pegged(0, 0), + batch_fee_input, base_system_contracts_hashes: base_system_contracts.hashes(), protocol_version: Some(protocol_version.minor), virtual_blocks: 0, gas_limit: 0, logs_bloom: Bloom::zero(), + pubdata_params: Default::default(), }; let mut transaction = storage.start_transaction().await?; @@ -431,7 +438,11 @@ pub async fn create_genesis_l1_batch( .await?; transaction .blocks_dal() - .insert_l1_batch( + .insert_l1_batch(genesis_l1_batch_header.to_unsealed_header(batch_fee_input)) + .await?; + transaction + .blocks_dal() + .mark_l1_batch_as_sealed( &genesis_l1_batch_header, &[], BlockGasCount::default(), diff --git a/core/node/genesis/src/utils.rs b/core/node/genesis/src/utils.rs index b3dc34dd8da..62be43a0fe7 100644 --- a/core/node/genesis/src/utils.rs +++ b/core/node/genesis/src/utils.rs @@ -130,7 +130,8 @@ pub(super) async fn insert_base_system_contracts_to_factory_deps( contracts: &BaseSystemContracts, ) -> Result<(), GenesisError> { let factory_deps = [&contracts.bootloader, &contracts.default_aa] - .iter() + .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, be_words_to_bytes(&c.code))) .collect(); diff --git a/core/node/metadata_calculator/src/api_server/metrics.rs b/core/node/metadata_calculator/src/api_server/metrics.rs index d185861d07c..92f948e0970 100644 --- a/core/node/metadata_calculator/src/api_server/metrics.rs +++ b/core/node/metadata_calculator/src/api_server/metrics.rs @@ -9,6 +9,8 @@ use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics pub(super) enum MerkleTreeApiMethod { Info, GetProofs, + GetNodes, + GetStaleKeys, } /// Metrics for Merkle tree API. diff --git a/core/node/metadata_calculator/src/api_server/mod.rs b/core/node/metadata_calculator/src/api_server/mod.rs index 6f46e8aeea8..4612d859a3d 100644 --- a/core/node/metadata_calculator/src/api_server/mod.rs +++ b/core/node/metadata_calculator/src/api_server/mod.rs @@ -1,6 +1,6 @@ //! Primitive Merkle tree API used internally to fetch proofs. -use std::{fmt, future::Future, net::SocketAddr, pin::Pin}; +use std::{collections::HashMap, fmt, future::Future, net::SocketAddr, pin::Pin}; use anyhow::Context as _; use async_trait::async_trait; @@ -10,12 +10,16 @@ use axum::{ response::{IntoResponse, Response}, routing, Json, Router, }; -use serde::{Deserialize, Serialize}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use tokio::sync::watch; use zksync_crypto_primitives::hasher::blake2::Blake2Hasher; use zksync_health_check::{CheckHealth, Health, HealthStatus}; -use zksync_merkle_tree::NoVersionError; -use zksync_types::{L1BatchNumber, H256, U256}; +use zksync_merkle_tree::{ + unstable::{NodeKey, RawNode}, + NoVersionError, ValueHash, +}; +use zksync_types::{web3, L1BatchNumber, H256, U256}; +use zksync_utils::u256_to_h256; use self::metrics::{MerkleTreeApiMethod, API_METRICS}; use crate::{AsyncTreeReader, LazyAsyncTreeReader, MerkleTreeInfo}; @@ -77,6 +81,117 @@ impl TreeEntryWithProof { } } +#[derive(Debug, PartialEq, Eq, Hash)] +struct HexNodeKey(NodeKey); + +impl Serialize for HexNodeKey { + fn serialize(&self, serializer: S) -> Result { + serializer.serialize_str(&self.0.to_string()) + } +} + +impl<'de> Deserialize<'de> for HexNodeKey { + fn deserialize>(deserializer: D) -> Result { + struct HexNodeKeyVisitor; + + impl de::Visitor<'_> for HexNodeKeyVisitor { + type Value = HexNodeKey; + + fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter.write_str("hex-encoded versioned key like `123:c0ffee`") + } + + fn visit_str(self, v: &str) -> Result { + v.parse().map(HexNodeKey).map_err(de::Error::custom) + } + } + + deserializer.deserialize_str(HexNodeKeyVisitor) + } +} + +#[derive(Debug, Serialize)] +struct ApiLeafNode { + full_key: H256, + value_hash: H256, + leaf_index: u64, +} + +#[derive(Debug, Serialize)] +struct ApiChildRef { + hash: ValueHash, + version: u64, + is_leaf: bool, +} + +#[derive(Debug, Serialize)] +#[serde(transparent)] +struct ApiInternalNode(HashMap); + +#[derive(Debug, Serialize)] +struct ApiRawNode { + raw: web3::Bytes, + #[serde(skip_serializing_if = "Option::is_none")] + leaf: Option, + #[serde(skip_serializing_if = "Option::is_none")] + internal: Option, +} + +impl From for ApiRawNode { + fn from(node: RawNode) -> Self { + Self { + raw: web3::Bytes(node.raw), + leaf: node.leaf.map(|leaf| ApiLeafNode { + full_key: u256_to_h256(leaf.full_key), + value_hash: leaf.value_hash, + leaf_index: leaf.leaf_index, + }), + internal: node.internal.map(|internal| { + ApiInternalNode( + internal + .children() + .map(|(nibble, child_ref)| { + let nibble = if nibble < 10 { + b'0' + nibble + } else { + b'a' + nibble - 10 + }; + ( + char::from(nibble), + ApiChildRef { + hash: child_ref.hash, + version: child_ref.version, + is_leaf: child_ref.is_leaf, + }, + ) + }) + .collect(), + ) + }), + } + } +} + +#[derive(Debug, Deserialize)] +struct TreeNodesRequest { + keys: Vec, +} + +#[derive(Debug, Serialize)] +struct TreeNodesResponse { + nodes: HashMap, +} + +#[derive(Debug, Deserialize)] +struct StaleKeysRequest { + l1_batch_number: L1BatchNumber, +} + +#[derive(Debug, Serialize)] +struct StaleKeysResponse { + stale_keys: Vec, +} + /// Server-side tree API error. #[derive(Debug)] enum TreeApiServerError { @@ -343,6 +458,35 @@ impl AsyncTreeReader { Ok(Json(response)) } + async fn get_nodes_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetNodes].start(); + let keys: Vec<_> = request.keys.iter().map(|key| key.0).collect(); + let nodes = this.clone().raw_nodes(keys).await; + let nodes = request + .keys + .into_iter() + .zip(nodes) + .filter_map(|(key, node)| Some((key, node?.into()))) + .collect(); + let response = TreeNodesResponse { nodes }; + latency.observe(); + Json(response) + } + + async fn get_stale_keys_handler( + State(this): State, + Json(request): Json, + ) -> Json { + let latency = API_METRICS.latency[&MerkleTreeApiMethod::GetStaleKeys].start(); + let stale_keys = this.clone().raw_stale_keys(request.l1_batch_number).await; + let stale_keys = stale_keys.into_iter().map(HexNodeKey).collect(); + latency.observe(); + Json(StaleKeysResponse { stale_keys }) + } + async fn create_api_server( self, bind_address: &SocketAddr, @@ -353,6 +497,11 @@ impl AsyncTreeReader { let app = Router::new() .route("/", routing::get(Self::info_handler)) .route("/proofs", routing::post(Self::get_proofs_handler)) + .route("/debug/nodes", routing::post(Self::get_nodes_handler)) + .route( + "/debug/stale-keys", + routing::post(Self::get_stale_keys_handler), + ) .with_state(self); let listener = tokio::net::TcpListener::bind(bind_address) @@ -369,8 +518,8 @@ impl AsyncTreeReader { } tracing::info!("Stop signal received, Merkle tree API server is shutting down"); }) - .await - .context("Merkle tree API server failed")?; + .await + .context("Merkle tree API server failed")?; tracing::info!("Merkle tree API server shut down"); Ok(()) diff --git a/core/node/metadata_calculator/src/api_server/tests.rs b/core/node/metadata_calculator/src/api_server/tests.rs index 42a3152e6b5..d5e8f328294 100644 --- a/core/node/metadata_calculator/src/api_server/tests.rs +++ b/core/node/metadata_calculator/src/api_server/tests.rs @@ -72,11 +72,69 @@ async fn merkle_tree_api() { assert_eq!(err.version_count, 6); assert_eq!(err.missing_version, 10); + let raw_nodes_response = api_client + .inner + .post(format!("http://{local_addr}/debug/nodes")) + .json(&serde_json::json!({ "keys": ["0:", "0:0"] })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_nodes_response: serde_json::Value = raw_nodes_response.json().await.unwrap(); + assert_raw_nodes_response(&raw_nodes_response); + + let raw_stale_keys_response = api_client + .inner + .post(format!("http://{local_addr}/debug/stale-keys")) + .json(&serde_json::json!({ "l1_batch_number": 1 })) + .send() + .await + .unwrap() + .error_for_status() + .unwrap(); + let raw_stale_keys_response: serde_json::Value = raw_stale_keys_response.json().await.unwrap(); + assert_raw_stale_keys_response(&raw_stale_keys_response); + // Stop the calculator and the tree API server. stop_sender.send_replace(true); api_server_task.await.unwrap().unwrap(); } +fn assert_raw_nodes_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let response = response["nodes"].as_object().expect("not an object"); + let root = response["0:"].as_object().expect("not an object"); + assert!( + root.len() == 2 && root.contains_key("internal") && root.contains_key("raw"), + "{root:#?}" + ); + let root = root["internal"].as_object().expect("not an object"); + for key in root.keys() { + assert_eq!(key.len(), 1, "{key}"); + let key = key.as_bytes()[0]; + assert_matches!(key, b'0'..=b'9' | b'a'..=b'f'); + } + + if let Some(value) = response.get("0:0") { + let node = value.as_object().expect("not an object"); + assert!( + node.len() == 2 && node.contains_key("internal") && node.contains_key("raw"), + "{node:#?}" + ); + } +} + +fn assert_raw_stale_keys_response(response: &serde_json::Value) { + let response = response.as_object().expect("not an object"); + let stale_keys = response["stale_keys"].as_array().expect("not an array"); + assert!(!stale_keys.is_empty()); // At least the root is always obsoleted + for stale_key in stale_keys { + let stale_key = stale_key.as_str().expect("not a string"); + stale_key.parse::().unwrap(); + } +} + #[tokio::test] async fn api_client_connection_error() { // Use an address that will definitely fail on a timeout. diff --git a/core/node/metadata_calculator/src/helpers.rs b/core/node/metadata_calculator/src/helpers.rs index b6989afb179..3f370afaf77 100644 --- a/core/node/metadata_calculator/src/helpers.rs +++ b/core/node/metadata_calculator/src/helpers.rs @@ -22,6 +22,7 @@ use zksync_health_check::{CheckHealth, Health, HealthStatus, ReactiveHealthCheck use zksync_merkle_tree::{ domain::{TreeMetadata, ZkSyncTree, ZkSyncTreeReader}, recovery::{MerkleTreeRecovery, PersistenceThreadHandle}, + unstable::{NodeKey, RawNode}, Database, Key, MerkleTreeColumnFamily, NoVersionError, RocksDBWrapper, TreeEntry, TreeEntryWithProof, TreeInstruction, }; @@ -35,7 +36,7 @@ use zksync_types::{ use super::{ metrics::{LoadChangesStage, TreeUpdateStage, METRICS}, pruning::PruningHandles, - MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, + MerkleTreeReaderConfig, MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig, }; /// General information about the Merkle tree. @@ -176,6 +177,40 @@ fn create_db_sync(config: &MetadataCalculatorConfig) -> anyhow::Result anyhow::Result { + tokio::task::spawn_blocking(move || { + let MerkleTreeReaderConfig { + db_path, + max_open_files, + multi_get_chunk_size, + block_cache_capacity, + include_indices_and_filters_in_block_cache, + } = config; + + tracing::info!( + "Initializing Merkle tree database at `{db_path}` (max open files: {max_open_files:?}) with {multi_get_chunk_size} multi-get chunk size, \ + {block_cache_capacity}B block cache (indices & filters included: {include_indices_and_filters_in_block_cache:?})" + ); + let mut db = RocksDB::with_options( + db_path.as_ref(), + RocksDBOptions { + block_cache_capacity: Some(block_cache_capacity), + include_indices_and_filters_in_block_cache, + max_open_files, + ..RocksDBOptions::default() + } + )?; + if cfg!(test) { + db = db.with_sync_writes(); + } + Ok(RocksDBWrapper::from(db)) + }) + .await + .context("panicked creating Merkle tree RocksDB")? +} + /// Wrapper around the "main" tree implementation used by [`MetadataCalculator`]. /// /// Async methods provided by this wrapper are not cancel-safe! This is probably not an issue; @@ -307,6 +342,13 @@ pub struct AsyncTreeReader { } impl AsyncTreeReader { + pub(super) fn new(db: RocksDBWrapper, mode: MerkleTreeMode) -> anyhow::Result { + Ok(Self { + inner: ZkSyncTreeReader::new(db)?, + mode, + }) + } + fn downgrade(&self) -> WeakAsyncTreeReader { WeakAsyncTreeReader { db: self.inner.db().clone().into_inner().downgrade(), @@ -366,6 +408,18 @@ impl AsyncTreeReader { .await .unwrap() } + + pub(crate) async fn raw_nodes(self, keys: Vec) -> Vec> { + tokio::task::spawn_blocking(move || self.inner.raw_nodes(&keys)) + .await + .unwrap() + } + + pub(crate) async fn raw_stale_keys(self, l1_batch_number: L1BatchNumber) -> Vec { + tokio::task::spawn_blocking(move || self.inner.raw_stale_keys(l1_batch_number)) + .await + .unwrap() + } } /// Version of async tree reader that holds a weak reference to RocksDB. Used in [`MerkleTreeHealthCheck`]. diff --git a/core/node/metadata_calculator/src/lib.rs b/core/node/metadata_calculator/src/lib.rs index 451090694b2..5c64330a0e7 100644 --- a/core/node/metadata_calculator/src/lib.rs +++ b/core/node/metadata_calculator/src/lib.rs @@ -27,6 +27,7 @@ pub use self::{ helpers::{AsyncTreeReader, LazyAsyncTreeReader, MerkleTreeInfo}, pruning::MerkleTreePruningTask, }; +use crate::helpers::create_readonly_db; pub mod api_server; mod helpers; @@ -264,3 +265,55 @@ impl MetadataCalculator { .await } } + +/// Configuration of [`TreeReaderTask`]. +#[derive(Debug, Clone)] +pub struct MerkleTreeReaderConfig { + /// Filesystem path to the RocksDB instance that stores the tree. + pub db_path: String, + /// Maximum number of files concurrently opened by RocksDB. Useful to fit into OS limits; can be used + /// as a rudimentary way to control RAM usage of the tree. + pub max_open_files: Option, + /// Chunk size for multi-get operations. Can speed up loading data for the Merkle tree on some environments, + /// but the effects vary wildly depending on the setup (e.g., the filesystem used). + pub multi_get_chunk_size: usize, + /// Capacity of RocksDB block cache in bytes. Reasonable values range from ~100 MiB to several GB. + pub block_cache_capacity: usize, + /// If specified, RocksDB indices and Bloom filters will be managed by the block cache, rather than + /// being loaded entirely into RAM on the RocksDB initialization. The block cache capacity should be increased + /// correspondingly; otherwise, RocksDB performance can significantly degrade. + pub include_indices_and_filters_in_block_cache: bool, +} + +/// Alternative to [`MetadataCalculator`] that provides readonly access to the Merkle tree. +#[derive(Debug)] +pub struct TreeReaderTask { + config: MerkleTreeReaderConfig, + tree_reader: watch::Sender>, +} + +impl TreeReaderTask { + /// Creates a new task with the provided configuration. + pub fn new(config: MerkleTreeReaderConfig) -> Self { + Self { + config, + tree_reader: watch::channel(None).0, + } + } + + /// Returns a reference to the tree reader. + pub fn tree_reader(&self) -> LazyAsyncTreeReader { + LazyAsyncTreeReader(self.tree_reader.subscribe()) + } + + /// Runs this task. The task exits on error, or when the tree reader is successfully initialized. + pub async fn run(self, mut stop_receiver: watch::Receiver) -> anyhow::Result<()> { + let db = tokio::select! { + db_result = create_readonly_db(self.config) => db_result?, + _ = stop_receiver.changed() => return Ok(()), + }; + let reader = AsyncTreeReader::new(db, MerkleTreeMode::Lightweight)?; + self.tree_reader.send_replace(Some(reader)); + Ok(()) + } +} diff --git a/core/node/metadata_calculator/src/updater.rs b/core/node/metadata_calculator/src/updater.rs index e2acf62dea8..17fd5d900ea 100644 --- a/core/node/metadata_calculator/src/updater.rs +++ b/core/node/metadata_calculator/src/updater.rs @@ -152,10 +152,6 @@ impl TreeUpdater { // right away without having to implement dedicated code. if let Some(object_key) = &object_key { - storage - .tee_verifier_input_producer_dal() - .create_tee_verifier_input_producer_job(l1_batch_number) - .await?; // Save the proof generation details to Postgres storage .proof_generation_dal() diff --git a/core/node/node_framework/Cargo.toml b/core/node/node_framework/Cargo.toml index 7491277c128..ae9f7498929 100644 --- a/core/node/node_framework/Cargo.toml +++ b/core/node/node_framework/Cargo.toml @@ -45,7 +45,6 @@ zksync_node_sync.workspace = true zksync_node_api_server.workspace = true zksync_node_consensus.workspace = true zksync_contract_verification_server.workspace = true -zksync_tee_verifier_input_producer.workspace = true zksync_queued_job_processor.workspace = true zksync_reorg_detector.workspace = true zksync_vm_runner.workspace = true diff --git a/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs b/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs deleted file mode 100644 index 14ab568c2f3..00000000000 --- a/core/node/node_framework/src/implementations/layers/base_token/coingecko_client.rs +++ /dev/null @@ -1,55 +0,0 @@ -use std::sync::Arc; - -use zksync_config::configs::ExternalPriceApiClientConfig; -use zksync_external_price_api::coingecko_api::CoinGeckoPriceAPIClient; - -use crate::{ - implementations::resources::price_api_client::PriceAPIClientResource, - wiring_layer::{WiringError, WiringLayer}, - IntoContext, -}; - -/// Wiring layer for `CoingeckoApiClient` -/// -/// Responsible for inserting a resource with a client to get base token prices from CoinGecko to be -/// used by the `BaseTokenRatioPersister`. -#[derive(Debug)] -pub struct CoingeckoClientLayer { - config: ExternalPriceApiClientConfig, -} - -impl CoingeckoClientLayer { - /// Identifier of used client type. - /// Can be used to choose the layer for the client based on configuration variables. - pub const CLIENT_NAME: &'static str = "coingecko"; -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub price_api_client: PriceAPIClientResource, -} - -impl CoingeckoClientLayer { - pub fn new(config: ExternalPriceApiClientConfig) -> Self { - Self { config } - } -} - -#[async_trait::async_trait] -impl WiringLayer for CoingeckoClientLayer { - type Input = (); - type Output = Output; - - fn layer_name(&self) -> &'static str { - "coingecko_api_client" - } - - async fn wire(self, _input: Self::Input) -> Result { - let cg_client = Arc::new(CoinGeckoPriceAPIClient::new(self.config)); - - Ok(Output { - price_api_client: cg_client.into(), - }) - } -} diff --git a/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs b/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs deleted file mode 100644 index 67785dc26ed..00000000000 --- a/core/node/node_framework/src/implementations/layers/base_token/forced_price_client.rs +++ /dev/null @@ -1,52 +0,0 @@ -use std::sync::Arc; - -use zksync_config::configs::ExternalPriceApiClientConfig; -use zksync_external_price_api::forced_price_client::ForcedPriceClient; - -use crate::{ - implementations::resources::price_api_client::PriceAPIClientResource, - wiring_layer::{WiringError, WiringLayer}, - IntoContext, -}; - -/// Wiring layer for `ForcedPriceClient` -/// -/// Inserts a resource with a forced configured price to be used by the `BaseTokenRatioPersister`. -#[derive(Debug)] -pub struct ForcedPriceClientLayer { - config: ExternalPriceApiClientConfig, -} - -impl ForcedPriceClientLayer { - pub fn new(config: ExternalPriceApiClientConfig) -> Self { - Self { config } - } - - /// Identifier of used client type. - /// Can be used to choose the layer for the client based on configuration variables. - pub const CLIENT_NAME: &'static str = "forced"; -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub price_api_client: PriceAPIClientResource, -} - -#[async_trait::async_trait] -impl WiringLayer for ForcedPriceClientLayer { - type Input = (); - type Output = Output; - - fn layer_name(&self) -> &'static str { - "forced_price_client" - } - - async fn wire(self, _input: Self::Input) -> Result { - let forced_client = Arc::new(ForcedPriceClient::new(self.config)); - - Ok(Output { - price_api_client: forced_client.into(), - }) - } -} diff --git a/core/node/node_framework/src/implementations/layers/base_token/mod.rs b/core/node/node_framework/src/implementations/layers/base_token/mod.rs index 5b58527a3d8..7a63b573d78 100644 --- a/core/node/node_framework/src/implementations/layers/base_token/mod.rs +++ b/core/node/node_framework/src/implementations/layers/base_token/mod.rs @@ -1,5 +1,92 @@ +use std::{str::FromStr, sync::Arc}; + +use zksync_config::configs::ExternalPriceApiClientConfig; +use zksync_external_price_api::{ + cmc_api::CmcPriceApiClient, coingecko_api::CoinGeckoPriceAPIClient, + forced_price_client::ForcedPriceClient, NoOpPriceAPIClient, +}; + +use crate::{ + implementations::resources::price_api_client::PriceAPIClientResource, IntoContext, WiringError, + WiringLayer, +}; + pub mod base_token_ratio_persister; pub mod base_token_ratio_provider; -pub mod coingecko_client; -pub mod forced_price_client; -pub mod no_op_external_price_api_client; + +#[derive(Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord, Default)] +enum ExternalPriceApiKind { + #[default] + NoOp, + Forced, + CoinGecko, + CoinMarketCap, +} + +#[derive(Debug, thiserror::Error)] +#[error("Unknown external price API client source: \"{0}\"")] +pub struct UnknownExternalPriceApiClientSourceError(String); + +impl FromStr for ExternalPriceApiKind { + type Err = UnknownExternalPriceApiClientSourceError; + + fn from_str(s: &str) -> Result { + Ok(match &s.to_lowercase()[..] { + "no-op" | "noop" => Self::NoOp, + "forced" => Self::Forced, + "coingecko" => Self::CoinGecko, + "coinmarketcap" => Self::CoinMarketCap, + _ => return Err(UnknownExternalPriceApiClientSourceError(s.to_owned())), + }) + } +} + +impl ExternalPriceApiKind { + fn instantiate(&self, config: ExternalPriceApiClientConfig) -> PriceAPIClientResource { + PriceAPIClientResource(match self { + Self::NoOp => Arc::new(NoOpPriceAPIClient {}), + Self::Forced => Arc::new(ForcedPriceClient::new(config)), + Self::CoinGecko => Arc::new(CoinGeckoPriceAPIClient::new(config)), + Self::CoinMarketCap => Arc::new(CmcPriceApiClient::new(config)), + }) + } +} + +#[derive(Debug)] +pub struct ExternalPriceApiLayer { + kind: ExternalPriceApiKind, + config: ExternalPriceApiClientConfig, +} + +impl TryFrom for ExternalPriceApiLayer { + type Error = UnknownExternalPriceApiClientSourceError; + + fn try_from(config: ExternalPriceApiClientConfig) -> Result { + Ok(Self { + kind: config.source.parse()?, + config, + }) + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct Output { + pub price_api_client: PriceAPIClientResource, +} + +#[async_trait::async_trait] +impl WiringLayer for ExternalPriceApiLayer { + type Input = (); + type Output = Output; + + fn layer_name(&self) -> &'static str { + "external_price_api" + } + + async fn wire(self, _input: Self::Input) -> Result { + Ok(Output { + price_api_client: self.kind.instantiate(self.config), + }) + } +} diff --git a/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs b/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs deleted file mode 100644 index 2bf5eda798f..00000000000 --- a/core/node/node_framework/src/implementations/layers/base_token/no_op_external_price_api_client.rs +++ /dev/null @@ -1,45 +0,0 @@ -use std::sync::Arc; - -use zksync_external_price_api::NoOpPriceAPIClient; - -use crate::{ - implementations::resources::price_api_client::PriceAPIClientResource, - wiring_layer::{WiringError, WiringLayer}, - IntoContext, -}; - -/// Wiring layer for `NoOpExternalPriceApiClient` -/// -/// Inserts a resource with a no-op client to get base token prices to be used by the `BaseTokenRatioPersister`. -#[derive(Debug)] -pub struct NoOpExternalPriceApiClientLayer; - -impl NoOpExternalPriceApiClientLayer { - /// Identifier of used client type. - /// Can be used to choose the layer for the client based on configuration variables. - pub const CLIENT_NAME: &'static str = "no-op"; -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - pub price_api_client: PriceAPIClientResource, -} - -#[async_trait::async_trait] -impl WiringLayer for NoOpExternalPriceApiClientLayer { - type Input = (); - type Output = Output; - - fn layer_name(&self) -> &'static str { - "no_op_external_price_api_client" - } - - async fn wire(self, _input: Self::Input) -> Result { - let no_op_client = Arc::new(NoOpPriceAPIClient {}); - - Ok(Output { - price_api_client: no_op_client.into(), - }) - } -} diff --git a/core/node/node_framework/src/implementations/layers/gas_adjuster.rs b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs index 229700289a7..241c4d829be 100644 --- a/core/node/node_framework/src/implementations/layers/gas_adjuster.rs +++ b/core/node/node_framework/src/implementations/layers/gas_adjuster.rs @@ -1,8 +1,9 @@ use std::sync::Arc; use anyhow::Context; -use zksync_config::{configs::eth_sender::PubdataSendingMode, GasAdjusterConfig, GenesisConfig}; +use zksync_config::{GasAdjusterConfig, GenesisConfig}; use zksync_node_fee_model::l1_gas_price::GasAdjuster; +use zksync_types::pubdata_da::PubdataSendingMode; use crate::{ implementations::resources::{ diff --git a/core/node/node_framework/src/implementations/layers/l1_gas.rs b/core/node/node_framework/src/implementations/layers/l1_gas.rs index 35c4bc3fc20..28f81bb4543 100644 --- a/core/node/node_framework/src/implementations/layers/l1_gas.rs +++ b/core/node/node_framework/src/implementations/layers/l1_gas.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use zksync_config::configs::chain::StateKeeperConfig; +use zksync_config::configs::chain::{FeeModelVersion, StateKeeperConfig}; use zksync_node_fee_model::{ApiFeeInputProvider, MainNodeFeeInputProvider}; -use zksync_types::fee_model::FeeModelConfig; +use zksync_types::fee_model::{FeeModelConfig, FeeModelConfigV1, FeeModelConfigV2}; use crate::{ implementations::resources::{ @@ -20,7 +20,7 @@ use crate::{ /// Adds several resources that depend on L1 gas price. #[derive(Debug)] pub struct L1GasLayer { - state_keeper_config: StateKeeperConfig, + fee_model_config: FeeModelConfig, } #[derive(Debug, FromContext)] @@ -42,9 +42,25 @@ pub struct Output { } impl L1GasLayer { - pub fn new(state_keeper_config: StateKeeperConfig) -> Self { + pub fn new(state_keeper_config: &StateKeeperConfig) -> Self { Self { - state_keeper_config, + fee_model_config: Self::map_config(state_keeper_config), + } + } + + fn map_config(state_keeper_config: &StateKeeperConfig) -> FeeModelConfig { + match state_keeper_config.fee_model_version { + FeeModelVersion::V1 => FeeModelConfig::V1(FeeModelConfigV1 { + minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, + }), + FeeModelVersion::V2 => FeeModelConfig::V2(FeeModelConfigV2 { + minimal_l2_gas_price: state_keeper_config.minimal_l2_gas_price, + compute_overhead_part: state_keeper_config.compute_overhead_part, + pubdata_overhead_part: state_keeper_config.pubdata_overhead_part, + batch_overhead_l1_gas: state_keeper_config.batch_overhead_l1_gas, + max_gas_per_batch: state_keeper_config.max_gas_per_batch, + max_pubdata_per_batch: state_keeper_config.max_pubdata_per_batch, + }), } } } @@ -64,7 +80,7 @@ impl WiringLayer for L1GasLayer { let main_fee_input_provider = Arc::new(MainNodeFeeInputProvider::new( input.gas_adjuster.0.clone(), ratio_provider.0, - FeeModelConfig::from_state_keeper_config(&self.state_keeper_config), + self.fee_model_config, )); let replica_pool = input.replica_pool.get().await?; diff --git a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs index 827ec69d942..4092ee6dcd5 100644 --- a/core/node/node_framework/src/implementations/layers/metadata_calculator.rs +++ b/core/node/node_framework/src/implementations/layers/metadata_calculator.rs @@ -7,7 +7,8 @@ use std::{ use anyhow::Context as _; use zksync_config::configs::{api::MerkleTreeApiConfig, database::MerkleTreeMode}; use zksync_metadata_calculator::{ - LazyAsyncTreeReader, MerkleTreePruningTask, MetadataCalculator, MetadataCalculatorConfig, + LazyAsyncTreeReader, MerkleTreePruningTask, MerkleTreeReaderConfig, MetadataCalculator, + MetadataCalculatorConfig, TreeReaderTask, }; use zksync_storage::RocksDB; @@ -19,7 +20,7 @@ use crate::{ web3_api::TreeApiClientResource, }, service::{ShutdownHook, StopReceiver}, - task::{Task, TaskId}, + task::{Task, TaskId, TaskKind}, wiring_layer::{WiringError, WiringLayer}, FromContext, IntoContext, }; @@ -205,3 +206,65 @@ impl Task for MerkleTreePruningTask { (*self).run(stop_receiver.0).await } } + +/// Mutually exclusive with [`MetadataCalculatorLayer`]. +#[derive(Debug)] +pub struct TreeApiServerLayer { + config: MerkleTreeReaderConfig, + api_config: MerkleTreeApiConfig, +} + +impl TreeApiServerLayer { + pub fn new(config: MerkleTreeReaderConfig, api_config: MerkleTreeApiConfig) -> Self { + Self { config, api_config } + } +} + +#[derive(Debug, IntoContext)] +#[context(crate = crate)] +pub struct TreeApiServerOutput { + tree_api_client: TreeApiClientResource, + #[context(task)] + tree_reader_task: TreeReaderTask, + #[context(task)] + tree_api_task: TreeApiTask, +} + +#[async_trait::async_trait] +impl WiringLayer for TreeApiServerLayer { + type Input = (); + type Output = TreeApiServerOutput; + + fn layer_name(&self) -> &'static str { + "tree_api_server" + } + + async fn wire(self, (): Self::Input) -> Result { + let tree_reader_task = TreeReaderTask::new(self.config); + let bind_addr = (Ipv4Addr::UNSPECIFIED, self.api_config.port).into(); + let tree_api_task = TreeApiTask { + bind_addr, + tree_reader: tree_reader_task.tree_reader(), + }; + Ok(TreeApiServerOutput { + tree_api_client: TreeApiClientResource(Arc::new(tree_reader_task.tree_reader())), + tree_api_task, + tree_reader_task, + }) + } +} + +#[async_trait::async_trait] +impl Task for TreeReaderTask { + fn kind(&self) -> TaskKind { + TaskKind::OneshotTask + } + + fn id(&self) -> TaskId { + "merkle_tree_reader_task".into() + } + + async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { + (*self).run(stop_receiver.0).await + } +} diff --git a/core/node/node_framework/src/implementations/layers/mod.rs b/core/node/node_framework/src/implementations/layers/mod.rs index 75828da1902..11a62c9333b 100644 --- a/core/node/node_framework/src/implementations/layers/mod.rs +++ b/core/node/node_framework/src/implementations/layers/mod.rs @@ -33,7 +33,6 @@ pub mod reorg_detector; pub mod sigint; pub mod state_keeper; pub mod sync_state_updater; -pub mod tee_verifier_input_producer; pub mod tree_data_fetcher; pub mod validate_chain_ids; pub mod vm_runner; diff --git a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs index b53ff73c1a0..3e1269caa4e 100644 --- a/core/node/node_framework/src/implementations/layers/proof_data_handler.rs +++ b/core/node/node_framework/src/implementations/layers/proof_data_handler.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core}; use zksync_object_store::ObjectStore; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; use crate::{ implementations::resources::{ @@ -21,6 +21,7 @@ use crate::{ pub struct ProofDataHandlerLayer { proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[derive(Debug, FromContext)] @@ -41,10 +42,12 @@ impl ProofDataHandlerLayer { pub fn new( proof_data_handler_config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Self { Self { proof_data_handler_config, commitment_mode, + l2_chain_id, } } } @@ -67,6 +70,7 @@ impl WiringLayer for ProofDataHandlerLayer { blob_store, main_pool, commitment_mode: self.commitment_mode, + l2_chain_id: self.l2_chain_id, }; Ok(Output { task }) @@ -79,6 +83,7 @@ pub struct ProofDataHandlerTask { blob_store: Arc, main_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, } #[async_trait::async_trait] @@ -93,6 +98,7 @@ impl Task for ProofDataHandlerTask { self.blob_store, self.main_pool, self.commitment_mode, + self.l2_chain_id, stop_receiver.0, ) .await diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs index 31b76550767..2c23f5aa9a1 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/external_io.rs @@ -8,6 +8,7 @@ use zksync_types::L2ChainId; use crate::{ implementations::resources::{ action_queue::ActionQueueSenderResource, + healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, state_keeper::{ConditionalSealerResource, StateKeeperIOResource}, @@ -26,6 +27,7 @@ pub struct ExternalIOLayer { #[derive(Debug, FromContext)] #[context(crate = crate)] pub struct Input { + pub app_health: AppHealthCheckResource, pub pool: PoolResource, pub main_node_client: MainNodeClientResource, } @@ -57,6 +59,10 @@ impl WiringLayer for ExternalIOLayer { async fn wire(self, input: Self::Input) -> Result { // Create `SyncState` resource. let sync_state = SyncState::default(); + let app_health = &input.app_health.0; + app_health + .insert_custom_component(Arc::new(sync_state.clone())) + .map_err(WiringError::internal)?; // Create `ActionQueueSender` resource. let (action_queue_sender, action_queue) = ActionQueue::new(); diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs index 191b4a699b9..77992f34c7f 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/mempool_io.rs @@ -1,13 +1,10 @@ use anyhow::Context as _; -use zksync_config::{ - configs::{ - chain::{MempoolConfig, StateKeeperConfig}, - wallets, - }, - ContractsConfig, GenesisConfig, +use zksync_config::configs::{ + chain::{MempoolConfig, StateKeeperConfig}, + wallets, }; use zksync_state_keeper::{MempoolFetcher, MempoolGuard, MempoolIO, SequencerSealer}; -use zksync_types::L2ChainId; +use zksync_types::{commitment::L1BatchCommitmentMode, Address, L2ChainId}; use crate::{ implementations::resources::{ @@ -41,9 +38,9 @@ pub struct MempoolIOLayer { zksync_network_id: L2ChainId, state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, - contracts_config: ContractsConfig, - genesis_config: GenesisConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option

, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, } #[derive(Debug, FromContext)] @@ -67,17 +64,17 @@ impl MempoolIOLayer { zksync_network_id: L2ChainId, state_keeper_config: StateKeeperConfig, mempool_config: MempoolConfig, - contracts_config: ContractsConfig, - genesis_config: GenesisConfig, wallets: wallets::StateKeeper, + l2_da_validator_addr: Option
, + l1_batch_commit_data_generator_mode: L1BatchCommitmentMode, ) -> Self { Self { zksync_network_id, state_keeper_config, mempool_config, - contracts_config, - genesis_config, wallets, + l2_da_validator_addr, + l1_batch_commit_data_generator_mode, } } @@ -136,10 +133,10 @@ impl WiringLayer for MempoolIOLayer { mempool_db_pool, &self.state_keeper_config, self.wallets.fee_account.address(), - self.contracts_config.l2_da_validator_addr, - self.genesis_config.l1_batch_commit_data_generator_mode, self.mempool_config.delay_interval(), self.zksync_network_id, + self.l2_da_validator_addr, + self.l1_batch_commit_data_generator_mode, )?; // Create sealer. diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs index 7a0de619364..1a07591c1cd 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/output_handler.rs @@ -1,12 +1,10 @@ use anyhow::Context as _; -use zksync_dal::{Core, CoreDal}; -use zksync_db_connection::connection_pool::ConnectionPool; use zksync_node_framework_derive::FromContext; use zksync_state_keeper::{ io::seal_logic::l2_block_seal_subtasks::L2BlockSealProcess, L2BlockSealerTask, OutputHandler, StateKeeperPersistence, TreeWritesPersistence, }; -use zksync_types::{Address, ProtocolVersionId}; +use zksync_types::Address; use crate::{ implementations::resources::{ @@ -89,38 +87,6 @@ impl OutputHandlerLayer { self.protective_reads_persistence_enabled = protective_reads_persistence_enabled; self } - - async fn validate_l2_legacy_shared_bridge_addr( - &self, - pool: &ConnectionPool, - ) -> Result<(), WiringError> { - let mut connection = pool.connection().await.context("Get DB connection")?; - - if let Some(l2_block) = connection - .blocks_dal() - .get_earliest_l2_block_number() - .await - .context("failed to load earliest l2 block number")? - { - let header = connection - .blocks_dal() - .get_l2_block_header(l2_block) - .await - .context("failed to load L2 block header")? - .context("missing L2 block header")?; - let protocol_version = header - .protocol_version - .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); - - if protocol_version.is_pre_gateway() && self.l2_legacy_shared_bridge_addr.is_none() { - return Err(WiringError::Configuration( - "Missing `l2_legacy_shared_bridge_addr` for chain that was initialized before gateway upgrade".to_string() - )); - } - } - - Ok(()) - } } #[async_trait::async_trait] @@ -140,14 +106,13 @@ impl WiringLayer for OutputHandlerLayer { .get_custom(L2BlockSealProcess::subtasks_len()) .await .context("Get master pool")?; - self.validate_l2_legacy_shared_bridge_addr(&persistence_pool) - .await?; let (mut persistence, l2_block_sealer) = StateKeeperPersistence::new( persistence_pool.clone(), self.l2_legacy_shared_bridge_addr, self.l2_block_seal_queue_capacity, - ); + ) + .await?; if self.pre_insert_txs { persistence = persistence.with_tx_insertion(); } diff --git a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs index 1f86b43f7a5..dd2652dfddb 100644 --- a/core/node/node_framework/src/implementations/layers/sync_state_updater.rs +++ b/core/node/node_framework/src/implementations/layers/sync_state_updater.rs @@ -1,9 +1,12 @@ +use std::sync::Arc; + use zksync_dal::{ConnectionPool, Core}; use zksync_node_sync::SyncState; use zksync_web3_decl::client::{DynClient, L2}; use crate::{ implementations::resources::{ + healthcheck::AppHealthCheckResource, main_node_client::MainNodeClientResource, pools::{MasterPool, PoolResource}, sync_state::SyncStateResource, @@ -24,6 +27,7 @@ pub struct SyncStateUpdaterLayer; pub struct Input { /// Fetched to check whether the `SyncState` was already provided by another layer. pub sync_state: Option, + pub app_health: AppHealthCheckResource, pub master_pool: PoolResource, pub main_node_client: MainNodeClientResource, } @@ -62,6 +66,10 @@ impl WiringLayer for SyncStateUpdaterLayer { let MainNodeClientResource(main_node_client) = input.main_node_client; let sync_state = SyncState::default(); + let app_health = &input.app_health.0; + app_health + .insert_custom_component(Arc::new(sync_state.clone())) + .map_err(WiringError::internal)?; Ok(Output { sync_state: Some(sync_state.clone().into()), diff --git a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs b/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs deleted file mode 100644 index 68789082a22..00000000000 --- a/core/node/node_framework/src/implementations/layers/tee_verifier_input_producer.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier_input_producer::TeeVerifierInputProducer; -use zksync_types::L2ChainId; - -use crate::{ - implementations::resources::{ - object_store::ObjectStoreResource, - pools::{MasterPool, PoolResource}, - }, - service::StopReceiver, - task::{Task, TaskId}, - wiring_layer::{WiringError, WiringLayer}, - FromContext, IntoContext, -}; - -/// Wiring layer for [`TeeVerifierInputProducer`]. -#[derive(Debug)] -pub struct TeeVerifierInputProducerLayer { - l2_chain_id: L2ChainId, -} - -impl TeeVerifierInputProducerLayer { - pub fn new(l2_chain_id: L2ChainId) -> Self { - Self { l2_chain_id } - } -} - -#[derive(Debug, FromContext)] -#[context(crate = crate)] -pub struct Input { - pub master_pool: PoolResource, - pub object_store: ObjectStoreResource, -} - -#[derive(Debug, IntoContext)] -#[context(crate = crate)] -pub struct Output { - #[context(task)] - pub task: TeeVerifierInputProducer, -} - -#[async_trait::async_trait] -impl WiringLayer for TeeVerifierInputProducerLayer { - type Input = Input; - type Output = Output; - - fn layer_name(&self) -> &'static str { - "tee_verifier_input_producer_layer" - } - - async fn wire(self, input: Self::Input) -> Result { - let pool = input.master_pool.get().await?; - let ObjectStoreResource(object_store) = input.object_store; - let task = TeeVerifierInputProducer::new(pool, object_store, self.l2_chain_id).await?; - - Ok(Output { task }) - } -} - -#[async_trait::async_trait] -impl Task for TeeVerifierInputProducer { - fn id(&self) -> TaskId { - "tee_verifier_input_producer".into() - } - - async fn run(self: Box, stop_receiver: StopReceiver) -> anyhow::Result<()> { - (*self).run(stop_receiver.0, None).await - } -} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs new file mode 100644 index 00000000000..4ba8098c839 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs @@ -0,0 +1,48 @@ +use std::time::Duration; + +use zksync_node_api_server::web3::state::BridgeAddressesHandle; +use zksync_web3_decl::{ + client::{DynClient, L2}, + namespaces::ZksNamespaceClient, +}; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct BridgeAddressesUpdaterTask { + pub bridge_address_updater: BridgeAddressesHandle, + pub main_node_client: Box>, + pub update_interval: Option, +} + +#[async_trait::async_trait] +impl Task for BridgeAddressesUpdaterTask { + fn id(&self) -> TaskId { + "bridge_addresses_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + const DEFAULT_INTERVAL: Duration = Duration::from_secs(30); + + let update_interval = self.update_interval.unwrap_or(DEFAULT_INTERVAL); + while !*stop_receiver.0.borrow_and_update() { + match self.main_node_client.get_bridge_contracts().await { + Ok(bridge_addresses) => { + self.bridge_address_updater.update(bridge_addresses).await; + } + Err(err) => { + tracing::error!("Failed to query `get_bridge_contracts`, error: {err:?}"); + } + } + + if tokio::time::timeout(update_interval, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs similarity index 81% rename from core/node/node_framework/src/implementations/layers/web3_api/server.rs rename to core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs index 0a39ae747c7..390d321647c 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs @@ -3,15 +3,24 @@ use std::{num::NonZeroU32, time::Duration}; use tokio::{sync::oneshot, task::JoinHandle}; use zksync_circuit_breaker::replication_lag::ReplicationLagChecker; use zksync_config::configs::api::MaxResponseSize; -use zksync_node_api_server::web3::{state::InternalApiConfig, ApiBuilder, ApiServer, Namespace}; +use zksync_node_api_server::web3::{ + state::{BridgeAddressesHandle, InternalApiConfig, SealedL2BlockNumber}, + ApiBuilder, ApiServer, Namespace, +}; use crate::{ - implementations::resources::{ - circuit_breakers::CircuitBreakersResource, - healthcheck::AppHealthCheckResource, - pools::{PoolResource, ReplicaPool}, - sync_state::SyncStateResource, - web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + implementations::{ + layers::web3_api::server::{ + bridge_addresses::BridgeAddressesUpdaterTask, sealed_l2_block::SealedL2BlockUpdaterTask, + }, + resources::{ + circuit_breakers::CircuitBreakersResource, + healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + pools::{PoolResource, ReplicaPool}, + sync_state::SyncStateResource, + web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + }, }, service::StopReceiver, task::{Task, TaskId}, @@ -19,6 +28,9 @@ use crate::{ FromContext, IntoContext, }; +mod bridge_addresses; +mod sealed_l2_block; + /// Set of optional variables that can be altered to modify the behavior of API builder. #[derive(Debug, Default)] pub struct Web3ServerOptionalConfig { @@ -33,6 +45,8 @@ pub struct Web3ServerOptionalConfig { pub replication_lag_limit: Option, // Used by the external node. pub pruning_info_refresh_interval: Option, + // Used by the external node. + pub bridge_addresses_refresh_interval: Option, pub polling_interval: Option, } @@ -61,6 +75,10 @@ impl Web3ServerOptionalConfig { if let Some(polling_interval) = self.polling_interval { api_builder = api_builder.with_polling_interval(polling_interval); } + if let Some(pruning_info_refresh_interval) = self.pruning_info_refresh_interval { + api_builder = + api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); + } api_builder = api_builder.with_extended_tracing(self.with_extended_tracing); api_builder } @@ -109,6 +127,7 @@ pub struct Input { pub circuit_breakers: CircuitBreakersResource, #[context(default)] pub app_health: AppHealthCheckResource, + pub main_node_client: Option, } #[derive(Debug, IntoContext)] @@ -118,6 +137,10 @@ pub struct Output { pub web3_api_task: Web3ApiTask, #[context(task)] pub garbage_collector_task: ApiTaskGarbageCollector, + #[context(task)] + pub sealed_l2_block_updater_task: SealedL2BlockUpdaterTask, + #[context(task)] + pub bridge_addresses_updater_task: Option, } impl Web3ServerLayer { @@ -163,20 +186,39 @@ impl WiringLayer for Web3ServerLayer { async fn wire(self, input: Self::Input) -> Result { // Get required resources. let replica_resource_pool = input.replica_pool; - let updaters_pool = replica_resource_pool.get_custom(2).await?; + let updaters_pool = replica_resource_pool.get_custom(1).await?; let replica_pool = replica_resource_pool.get().await?; let TxSenderResource(tx_sender) = input.tx_sender; let MempoolCacheResource(mempool_cache) = input.mempool_cache; let sync_state = input.sync_state.map(|state| state.0); let tree_api_client = input.tree_api_client.map(|client| client.0); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = + BridgeAddressesHandle::new(self.internal_api_config.bridge_addresses.clone()); + + let sealed_l2_block_updater_task = SealedL2BlockUpdaterTask { + number_updater: sealed_l2_block_handle.clone(), + pool: updaters_pool, + }; + // Bridge addresses updater task must be started for ENs and only for ENs. + let bridge_addresses_updater_task = + input + .main_node_client + .map(|main_node_client| BridgeAddressesUpdaterTask { + bridge_address_updater: bridge_addresses_handle.clone(), + main_node_client: main_node_client.0, + update_interval: self.optional_config.bridge_addresses_refresh_interval, + }); + // Build server. let mut api_builder = ApiBuilder::jsonrpsee_backend(self.internal_api_config, replica_pool.clone()) - .with_updaters_pool(updaters_pool) .with_tx_sender(tx_sender) .with_mempool_cache(mempool_cache) - .with_extended_tracing(self.optional_config.with_extended_tracing); + .with_extended_tracing(self.optional_config.with_extended_tracing) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle); if let Some(client) = tree_api_client { api_builder = api_builder.with_tree_api(client); } @@ -191,14 +233,9 @@ impl WiringLayer for Web3ServerLayer { if let Some(sync_state) = sync_state { api_builder = api_builder.with_sync_state(sync_state); } - if let Some(pruning_info_refresh_interval) = - self.optional_config.pruning_info_refresh_interval - { - api_builder = - api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); - } let replication_lag_limit = self.optional_config.replication_lag_limit; api_builder = self.optional_config.apply(api_builder); + let server = api_builder.build()?; // Insert healthcheck. @@ -230,6 +267,8 @@ impl WiringLayer for Web3ServerLayer { Ok(Output { web3_api_task, garbage_collector_task, + sealed_l2_block_updater_task, + bridge_addresses_updater_task, }) } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs new file mode 100644 index 00000000000..02552e212cd --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs @@ -0,0 +1,50 @@ +use std::time::Duration; + +use zksync_dal::{Core, CoreDal}; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_node_api_server::web3::state::SealedL2BlockNumber; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct SealedL2BlockUpdaterTask { + pub number_updater: SealedL2BlockNumber, + pub pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl Task for SealedL2BlockUpdaterTask { + fn id(&self) -> TaskId { + "api_sealed_l2_block_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // Chosen to be significantly smaller than the interval between L2 blocks, but larger than + // the latency of getting the latest sealed L2 block number from Postgres. If the API server + // processes enough requests, information about the latest sealed L2 block will be updated + // by reporting block difference metrics, so the actual update lag would be much smaller than this value. + const UPDATE_INTERVAL: Duration = Duration::from_millis(25); + + while !*stop_receiver.0.borrow_and_update() { + let mut connection = self.pool.connection_tagged("api").await.unwrap(); + let Some(last_sealed_l2_block) = + connection.blocks_dal().get_sealed_l2_block_number().await? + else { + tokio::time::sleep(UPDATE_INTERVAL).await; + continue; + }; + drop(connection); + + self.number_updater.update(last_sealed_l2_block); + + if tokio::time::timeout(UPDATE_INTERVAL, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index a09938055fa..ba1a69e23bb 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -32,6 +32,7 @@ pub struct PostgresStorageCachesConfig { pub factory_deps_cache_size: u64, pub initial_writes_cache_size: u64, pub latest_values_cache_size: u64, + pub latest_values_max_block_lag: u32, } /// Wiring layer for the `TxSender`. @@ -133,10 +134,13 @@ impl WiringLayer for TxSenderLayer { PostgresStorageCaches::new(factory_deps_capacity, initial_writes_capacity); let postgres_storage_caches_task = if values_capacity > 0 { - Some( - storage_caches - .configure_storage_values_cache(values_capacity, replica_pool.clone()), - ) + let update_task = storage_caches.configure_storage_values_cache( + values_capacity, + self.postgres_storage_caches_config + .latest_values_max_block_lag, + replica_pool.clone(), + ); + Some(update_task) } else { None }; diff --git a/core/node/node_framework/src/service/error.rs b/core/node/node_framework/src/service/error.rs index 890cc6b7d4b..66a1c13e873 100644 --- a/core/node/node_framework/src/service/error.rs +++ b/core/node/node_framework/src/service/error.rs @@ -1,20 +1,41 @@ +use std::fmt; + use crate::{task::TaskId, wiring_layer::WiringError}; /// An error that can occur during the task lifecycle. #[derive(Debug, thiserror::Error)] pub enum TaskError { - #[error("Task {0} failed: {1}")] + #[error("Task {0} failed: {1:#}")] TaskFailed(TaskId, anyhow::Error), #[error("Task {0} panicked: {1}")] TaskPanicked(TaskId, String), #[error("Shutdown for task {0} timed out")] TaskShutdownTimedOut(TaskId), - #[error("Shutdown hook {0} failed: {1}")] + #[error("Shutdown hook {0} failed: {1:#}")] ShutdownHookFailed(TaskId, anyhow::Error), #[error("Shutdown hook {0} timed out")] ShutdownHookTimedOut(TaskId), } +/// Wrapper of a list of errors with a reasonable formatting. +pub struct TaskErrors(pub Vec); + +impl From> for TaskErrors { + fn from(errs: Vec) -> Self { + Self(errs) + } +} + +impl fmt::Debug for TaskErrors { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + self.0 + .iter() + .map(|err| format!("{err:#}")) + .collect::>() + .fmt(f) + } +} + /// An error that can occur during the service lifecycle. #[derive(Debug, thiserror::Error)] pub enum ZkStackServiceError { @@ -25,5 +46,5 @@ pub enum ZkStackServiceError { #[error("One or more wiring layers failed to initialize: {0:?}")] Wiring(Vec<(String, WiringError)>), #[error("One or more tasks failed: {0:?}")] - Task(Vec), + Task(TaskErrors), } diff --git a/core/node/node_framework/src/service/mod.rs b/core/node/node_framework/src/service/mod.rs index b6d42009354..00e50f7dc3b 100644 --- a/core/node/node_framework/src/service/mod.rs +++ b/core/node/node_framework/src/service/mod.rs @@ -171,7 +171,7 @@ impl ZkStackService { if self.errors.is_empty() { Ok(()) } else { - Err(ZkStackServiceError::Task(self.errors)) + Err(ZkStackServiceError::Task(self.errors.into())) } } diff --git a/core/node/node_sync/Cargo.toml b/core/node/node_sync/Cargo.toml index 27b07fec621..b10cdca8a82 100644 --- a/core/node/node_sync/Cargo.toml +++ b/core/node/node_sync/Cargo.toml @@ -44,3 +44,4 @@ zksync_node_test_utils.workspace = true assert_matches.workspace = true once_cell.workspace = true test-casing.workspace = true +backon.workspace = true diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index 6075ff048bf..1be7e00543f 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -15,6 +15,7 @@ use zksync_state_keeper::{ updates::UpdatesManager, }; use zksync_types::{ + block::UnsealedL1BatchHeader, protocol_upgrade::ProtocolUpgradeTx, protocol_version::{ProtocolSemanticVersion, VersionPatch}, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -103,6 +104,63 @@ impl ExternalIO { } }) } + + async fn ensure_protocol_version_is_saved( + &self, + protocol_version: ProtocolVersionId, + ) -> anyhow::Result<()> { + let base_system_contract_hashes = self + .pool + .connection_tagged("sync_layer") + .await? + .protocol_versions_dal() + .get_base_system_contract_hashes_by_version_id(protocol_version as u16) + .await?; + if base_system_contract_hashes.is_some() { + return Ok(()); + } + tracing::info!("Fetching protocol version {protocol_version:?} from the main node"); + + let protocol_version = self + .main_node_client + .fetch_protocol_version(protocol_version) + .await + .context("failed to fetch protocol version from the main node")? + .context("protocol version is missing on the main node")?; + let minor = protocol_version + .minor_version() + .context("Missing minor protocol version")?; + let bootloader_code_hash = protocol_version + .bootloader_code_hash() + .context("Missing bootloader code hash")?; + let default_account_code_hash = protocol_version + .default_account_code_hash() + .context("Missing default account code hash")?; + let evm_emulator_code_hash = protocol_version.evm_emulator_code_hash(); + let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); + self.pool + .connection_tagged("sync_layer") + .await? + .protocol_versions_dal() + .save_protocol_version( + ProtocolSemanticVersion { + minor: minor + .try_into() + .context("cannot convert protocol version")?, + patch: VersionPatch(0), + }, + protocol_version.timestamp, + Default::default(), // verification keys are unused for EN + BaseSystemContractsHashes { + bootloader: bootloader_code_hash, + default_aa: default_account_code_hash, + evm_emulator: evm_emulator_code_hash, + }, + l2_system_upgrade_tx_hash, + ) + .await?; + Ok(()) + } } impl IoSealCriteria for ExternalIO { @@ -154,6 +212,14 @@ impl StateKeeperIO for ExternalIO { ) })?; let Some(mut pending_l2_block_header) = pending_l2_block_header else { + tracing::info!( + l1_batch_number = %cursor.l1_batch, + "No pending L2 blocks found; pruning unsealed batch if exists as we need at least one L2 block to initialize" + ); + storage + .blocks_dal() + .delete_unsealed_l1_batch(cursor.l1_batch - 1) + .await?; return Ok((cursor, None)); }; @@ -185,7 +251,7 @@ impl StateKeeperIO for ExternalIO { pending_l2_block_header.set_protocol_version(protocol_version); } - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .l1_batch_params_provider .load_l1_batch_params( &mut storage, @@ -200,7 +266,15 @@ impl StateKeeperIO for ExternalIO { cursor.l1_batch ) })?; - let data = load_pending_batch(&mut storage, system_env, l1_batch_env) + storage + .blocks_dal() + .ensure_unsealed_l1_batch_exists( + l1_batch_env + .clone() + .into_unsealed_header(Some(system_env.version)), + ) + .await?; + let data = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .with_context(|| { format!( @@ -236,6 +310,21 @@ impl StateKeeperIO for ExternalIO { "L2 block number mismatch: expected {}, got {first_l2_block_number}", cursor.next_l2_block ); + + self.ensure_protocol_version_is_saved(params.protocol_version) + .await?; + self.pool + .connection_tagged("sync_layer") + .await? + .blocks_dal() + .insert_l1_batch(UnsealedL1BatchHeader { + number: cursor.l1_batch, + timestamp: params.first_l2_block.timestamp, + protocol_version: Some(params.protocol_version), + fee_address: params.operator_address, + fee_input: params.fee_input, + }) + .await?; return Ok(Some(params)); } other => { @@ -321,63 +410,36 @@ impl StateKeeperIO for ExternalIO { .connection_tagged("sync_layer") .await? .protocol_versions_dal() - .load_base_system_contracts_by_version_id(protocol_version as u16) - .await - .context("failed loading base system contracts")?; - - if let Some(contracts) = base_system_contracts { - return Ok(contracts); - } - tracing::info!("Fetching protocol version {protocol_version:?} from the main node"); - - let protocol_version = self - .main_node_client - .fetch_protocol_version(protocol_version) - .await - .context("failed to fetch protocol version from the main node")? - .context("protocol version is missing on the main node")?; - let minor = protocol_version - .minor_version() - .context("Missing minor protocol version")?; - let bootloader_code_hash = protocol_version - .bootloader_code_hash() - .context("Missing bootloader code hash")?; - let default_account_code_hash = protocol_version - .default_account_code_hash() - .context("Missing default account code hash")?; - let l2_system_upgrade_tx_hash = protocol_version.l2_system_upgrade_tx_hash(); - self.pool - .connection_tagged("sync_layer") + .get_base_system_contract_hashes_by_version_id(protocol_version as u16) .await? - .protocol_versions_dal() - .save_protocol_version( - ProtocolSemanticVersion { - minor: minor - .try_into() - .context("cannot convert protocol version")?, - patch: VersionPatch(0), - }, - protocol_version.timestamp, - Default::default(), // verification keys are unused for EN - BaseSystemContractsHashes { - bootloader: bootloader_code_hash, - default_aa: default_account_code_hash, - }, - l2_system_upgrade_tx_hash, - ) - .await?; + .with_context(|| { + format!("Cannot load base system contracts' hashes for {protocol_version:?}. They should already be present") + })?; let bootloader = self - .get_base_system_contract(bootloader_code_hash, cursor.next_l2_block) + .get_base_system_contract(base_system_contracts.bootloader, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch bootloader code for {protocol_version:?}"))?; let default_aa = self - .get_base_system_contract(default_account_code_hash, cursor.next_l2_block) + .get_base_system_contract(base_system_contracts.default_aa, cursor.next_l2_block) .await .with_context(|| format!("cannot fetch default AA code for {protocol_version:?}"))?; + let evm_emulator = if let Some(hash) = base_system_contracts.evm_emulator { + Some( + self.get_base_system_contract(hash, cursor.next_l2_block) + .await + .with_context(|| { + format!("cannot fetch EVM emulator code for {protocol_version:?}") + })?, + ) + } else { + None + }; + Ok(BaseSystemContracts { bootloader, default_aa, + evm_emulator, }) } @@ -414,3 +476,98 @@ impl StateKeeperIO for ExternalIO { Ok(hash) } } + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use zksync_dal::{ConnectionPool, CoreDal}; + use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; + use zksync_state_keeper::{io::L1BatchParams, L2BlockParams, StateKeeperIO}; + use zksync_types::{ + api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, + H256, + }; + + use crate::{sync_action::SyncAction, testonly::MockMainNodeClient, ActionQueue, ExternalIO}; + + #[tokio::test] + async fn insert_batch_with_protocol_version() { + // Whenever ExternalIO inserts an unsealed batch into DB it should populate it with protocol + // version and make sure that it is present in the DB (i.e. fetch it from main node if not). + let pool = ConnectionPool::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + insert_genesis_batch(&mut conn, &GenesisParams::mock()) + .await + .unwrap(); + let (actions_sender, action_queue) = ActionQueue::new(); + let mut client = MockMainNodeClient::default(); + let next_protocol_version = api::ProtocolVersion { + minor_version: Some(ProtocolVersionId::next() as u16), + timestamp: 1, + bootloader_code_hash: Some(H256::repeat_byte(1)), + default_account_code_hash: Some(H256::repeat_byte(1)), + evm_emulator_code_hash: Some(H256::repeat_byte(1)), + ..api::ProtocolVersion::default() + }; + client.insert_protocol_version(next_protocol_version.clone()); + let mut external_io = ExternalIO::new( + pool.clone(), + action_queue, + Box::new(client), + L2ChainId::default(), + ) + .unwrap(); + + let (cursor, _) = external_io.initialize().await.unwrap(); + let params = L1BatchParams { + protocol_version: ProtocolVersionId::next(), + validation_computational_gas_limit: u32::MAX, + operator_address: Default::default(), + fee_input: BatchFeeInput::pubdata_independent(2, 3, 4), + first_l2_block: L2BlockParams { + timestamp: 1, + virtual_blocks: 1, + }, + pubdata_params: Default::default(), + }; + actions_sender + .push_action_unchecked(SyncAction::OpenBatch { + params: params.clone(), + number: L1BatchNumber(1), + first_l2_block_number: L2BlockNumber(1), + }) + .await + .unwrap(); + let fetched_params = external_io + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .unwrap(); + assert_eq!(fetched_params, params); + + // Verify that the next protocol version is in DB + let fetched_protocol_version = conn + .protocol_versions_dal() + .get_protocol_version_with_latest_patch(ProtocolVersionId::next()) + .await + .unwrap() + .unwrap(); + assert_eq!( + fetched_protocol_version.version.minor as u16, + next_protocol_version.minor_version.unwrap() + ); + + // Verify that the unsealed batch has protocol version + let unsealed_batch = conn + .blocks_dal() + .get_unsealed_l1_batch() + .await + .unwrap() + .unwrap(); + assert_eq!( + unsealed_batch.protocol_version, + Some(fetched_protocol_version.version.minor) + ); + } +} diff --git a/core/node/node_sync/src/fetcher.rs b/core/node/node_sync/src/fetcher.rs index 08e3d426243..9c76d1d93ca 100644 --- a/core/node/node_sync/src/fetcher.rs +++ b/core/node/node_sync/src/fetcher.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state_keeper::io::{common::IoCursor, L1BatchParams, L2BlockParams}; @@ -78,6 +79,14 @@ impl TryFrom for FetchedBlock { )); } + let pubdata_params = if block.protocol_version.is_pre_gateway() { + block.pubdata_params.unwrap_or_default() + } else { + block + .pubdata_params + .context("Missing `pubdata_params` for post-gateway payload")? + }; + Ok(Self { number: block.number, l1_batch_number: block.l1_batch_number, @@ -94,7 +103,7 @@ impl TryFrom for FetchedBlock { .into_iter() .map(FetchedTransaction::new) .collect(), - pubdata_params: block.pubdata_params.unwrap_or_default(), + pubdata_params, }) } } diff --git a/core/node/node_sync/src/genesis.rs b/core/node/node_sync/src/genesis.rs index ccc26b417e9..c5d4869175d 100644 --- a/core/node/node_sync/src/genesis.rs +++ b/core/node/node_sync/src/genesis.rs @@ -38,6 +38,7 @@ async fn create_genesis_params( let base_system_contracts_hashes = BaseSystemContractsHashes { bootloader: config.bootloader_hash.context("Genesis is not finished")?, default_aa: config.default_aa_hash.context("Genesis is not finished")?, + evm_emulator: config.evm_emulator_hash, }; if zksync_chain_id != config.l2_chain_id { @@ -47,10 +48,11 @@ async fn create_genesis_params( // Load the list of addresses that are known to contain system contracts at any point in time. // Not every of these addresses is guaranteed to be present in the genesis state, but we'll iterate through // them and try to fetch the contract bytecode for each of them. - let system_contract_addresses: Vec<_> = get_system_smart_contracts() - .into_iter() - .map(|contract| *contract.account_id.address()) - .collect(); + let system_contract_addresses: Vec<_> = + get_system_smart_contracts(config.evm_emulator_hash.is_some()) + .into_iter() + .map(|contract| *contract.account_id.address()) + .collect(); // These have to be *initial* base contract hashes of main node // (those that were used during genesis), not necessarily the current ones. @@ -103,6 +105,18 @@ async fn fetch_base_system_contracts( .fetch_system_contract_by_hash(contract_hashes.default_aa) .await? .context("default AA bytecode is missing on main node")?; + let evm_emulator = if let Some(hash) = contract_hashes.evm_emulator { + let bytes = client + .fetch_system_contract_by_hash(hash) + .await? + .context("EVM emulator bytecode is missing on main node")?; + Some(SystemContractCode { + code: zksync_utils::bytes_to_be_words(bytes), + hash, + }) + } else { + None + }; Ok(BaseSystemContracts { bootloader: SystemContractCode { code: zksync_utils::bytes_to_be_words(bootloader_bytecode), @@ -112,5 +126,6 @@ async fn fetch_base_system_contracts( code: zksync_utils::bytes_to_be_words(default_aa_bytecode), hash: contract_hashes.default_aa, }, + evm_emulator, }) } diff --git a/core/node/node_sync/src/sync_action.rs b/core/node/node_sync/src/sync_action.rs index 4505dbb93ab..897abfafb2a 100644 --- a/core/node/node_sync/src/sync_action.rs +++ b/core/node/node_sync/src/sync_action.rs @@ -33,6 +33,18 @@ impl ActionQueueSender { Ok(()) } + /// Pushes a single action into the queue without checking validity of the sequence. + /// + /// Useful to simulate situations where only a part of the sequence was executed on the node. + #[cfg(test)] + pub async fn push_action_unchecked(&self, action: SyncAction) -> anyhow::Result<()> { + self.0 + .send(action) + .await + .map_err(|_| anyhow::anyhow!("node action processor stopped"))?; + Ok(()) + } + /// Checks whether the action sequence is valid. /// Returned error is meant to be used as a panic message, since an invalid sequence represents an unrecoverable /// error. This function itself does not panic for the ease of testing. diff --git a/core/node/node_sync/src/sync_state.rs b/core/node/node_sync/src/sync_state.rs index e061ff7da01..f8a2fe00ec0 100644 --- a/core/node/node_sync/src/sync_state.rs +++ b/core/node/node_sync/src/sync_state.rs @@ -173,6 +173,7 @@ impl CheckHealth for SyncState { Health::from(&*self.0.borrow()) } } + impl SyncStateInner { fn is_synced(&self) -> (bool, Option) { if let (Some(main_node_block), Some(local_block)) = (self.main_node_block, self.local_block) diff --git a/core/node/node_sync/src/tests.rs b/core/node/node_sync/src/tests.rs index 8582bbe9374..172a00e8c14 100644 --- a/core/node/node_sync/src/tests.rs +++ b/core/node/node_sync/src/tests.rs @@ -2,6 +2,7 @@ use std::{iter, sync::Arc, time::Duration}; +use backon::{ConstantBuilder, Retryable}; use test_casing::test_casing; use tokio::{sync::watch, task::JoinHandle}; use zksync_contracts::BaseSystemContractsHashes; @@ -18,7 +19,7 @@ use zksync_state_keeper::{ }; use zksync_types::{ api, - block::L2BlockHasher, + block::{L2BlockHasher, UnsealedL1BatchHeader}, fee_model::{BatchFeeInput, PubdataIndependentBatchFeeModelInput}, snapshots::SnapshotRecoveryStatus, Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, @@ -62,12 +63,12 @@ impl MockMainNodeClient { l2_fair_gas_price: 3, fair_pubdata_price: Some(24), base_system_contracts_hashes: BaseSystemContractsHashes::default(), - pubdata_params: Default::default(), operator_address: Address::repeat_byte(2), transactions: Some(vec![]), virtual_blocks: Some(0), hash: Some(snapshot.l2_block_hash), protocol_version: ProtocolVersionId::latest(), + pubdata_params: Default::default(), }; Self { @@ -107,7 +108,9 @@ impl StateKeeperHandles { let sync_state = SyncState::default(); let (persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Some(Address::repeat_byte(1)), 5); + StateKeeperPersistence::new(pool.clone(), Some(Address::repeat_byte(1)), 5) + .await + .unwrap(); let tree_writes_persistence = TreeWritesPersistence::new(pool.clone()); let output_handler = OutputHandler::new(Box::new(persistence.with_tx_insertion())) .with_handler(Box::new(tree_writes_persistence)) @@ -306,6 +309,7 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo timestamp: snapshot.l2_block_timestamp + 1, bootloader_code_hash: Some(H256::repeat_byte(1)), default_account_code_hash: Some(H256::repeat_byte(1)), + evm_emulator_code_hash: Some(H256::repeat_byte(1)), ..api::ProtocolVersion::default() }; client.insert_protocol_version(next_protocol_version.clone()); @@ -347,6 +351,13 @@ async fn external_io_works_without_local_protocol_version(snapshot_recovery: boo next_protocol_version.default_account_code_hash.unwrap() ); + assert_eq!( + persisted_protocol_version + .base_system_contracts_hashes + .evm_emulator, + next_protocol_version.evm_emulator_code_hash + ); + let l2_block = storage .blocks_dal() .get_l2_block_header(snapshot.l2_block_number + 1) @@ -646,3 +657,101 @@ async fn external_io_with_multiple_l1_batches() { assert_eq!(fictive_l2_block.timestamp, 2); assert_eq!(fictive_l2_block.l2_tx_count, 0); } + +async fn wait_for_batch_to_be_open( + pool: &ConnectionPool, + number: L1BatchNumber, +) -> anyhow::Result { + (|| async { + let mut storage = pool.connection().await.unwrap(); + let unsealed_batch = storage.blocks_dal().get_unsealed_l1_batch().await?; + + if let Some(unsealed_batch) = unsealed_batch { + if unsealed_batch.number == number { + Ok(unsealed_batch) + } else { + Err(anyhow::anyhow!("L1 batch #{number} is not open yet")) + } + } else { + Err(anyhow::anyhow!("No unsealed L1 batch found yet")) + } + }) + .retry( + &ConstantBuilder::default() + .with_delay(Duration::from_millis(200)) + .with_max_times(20), + ) + .await +} + +#[tokio::test] +async fn external_io_empty_unsealed_batch() { + let pool = ConnectionPool::::test_pool().await; + let mut storage = pool.connection().await.unwrap(); + ensure_genesis(&mut storage).await; + drop(storage); + + let open_batch_one = open_l1_batch(1, 1, 1); + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + let tx = FetchedTransaction::new(tx.into()); + let open_batch_two = open_l1_batch(2, 2, 3); + let fictive_l2_block = SyncAction::L2Block { + params: L2BlockParams { + timestamp: 2, + virtual_blocks: 0, + }, + number: L2BlockNumber(2), + }; + let actions1 = vec![open_batch_one, tx.into(), SyncAction::SealL2Block]; + let actions2 = vec![fictive_l2_block, SyncAction::SealBatch]; + + let (actions_sender, action_queue) = ActionQueue::new(); + let client = MockMainNodeClient::default(); + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&[tx_hash]]).await; + actions_sender.push_actions(actions1).await.unwrap(); + actions_sender.push_actions(actions2).await.unwrap(); + // Unchecked insert of batch #2 to simulate restart in the middle of processing an action sequence + // In other words batch #2 is inserted completely empty with no blocks/txs present in it + actions_sender + .push_action_unchecked(open_batch_two.clone()) + .await + .unwrap(); + // Wait until the L2 block is sealed. + state_keeper.wait_for_local_block(L2BlockNumber(2)).await; + + // Wait until L1 batch #2 is opened and persisted. + let unsealed_batch = wait_for_batch_to_be_open(&pool, L1BatchNumber(2)) + .await + .unwrap(); + assert_eq!(unsealed_batch.number, L1BatchNumber(2)); + assert_eq!(unsealed_batch.timestamp, 2); + + // Prepare the rest of batch #2 + let tx = create_l2_transaction(20, 200); + let tx_hash = tx.hash(); + let tx = FetchedTransaction::new(tx.into()); + let fictive_l2_block = SyncAction::L2Block { + params: L2BlockParams { + timestamp: 4, + virtual_blocks: 0, + }, + number: L2BlockNumber(4), + }; + let actions1 = vec![open_batch_two, tx.into(), SyncAction::SealL2Block]; + let actions2 = vec![fictive_l2_block, SyncAction::SealBatch]; + + // Restart state keeper + let (actions_sender, action_queue) = ActionQueue::new(); + let client = MockMainNodeClient::default(); + let state_keeper = + StateKeeperHandles::new(pool.clone(), client, action_queue, &[&[tx_hash]]).await; + actions_sender.push_actions(actions1).await.unwrap(); + actions_sender.push_actions(actions2).await.unwrap(); + + let hash_task = tokio::spawn(mock_l1_batch_hash_computation(pool.clone(), 1)); + // Wait until the block #4 is sealed. + state_keeper.wait_for_local_block(L2BlockNumber(4)).await; + hash_task.await.unwrap(); +} diff --git a/core/node/proof_data_handler/Cargo.toml b/core/node/proof_data_handler/Cargo.toml index 82063b23fdb..e2ddc972a2f 100644 --- a/core/node/proof_data_handler/Cargo.toml +++ b/core/node/proof_data_handler/Cargo.toml @@ -17,9 +17,12 @@ zksync_dal.workspace = true zksync_object_store.workspace = true zksync_prover_interface.workspace = true zksync_types.workspace = true +zksync_vm_executor.workspace = true +zksync_utils.workspace = true anyhow.workspace = true axum.workspace = true tokio.workspace = true +tower-http = { workspace = true, features = ["compression-zstd", "decompression-zstd"] } tracing.workspace = true [dev-dependencies] diff --git a/core/node/proof_data_handler/src/errors.rs b/core/node/proof_data_handler/src/errors.rs index 15ef393294a..7d0e33ea0a3 100644 --- a/core/node/proof_data_handler/src/errors.rs +++ b/core/node/proof_data_handler/src/errors.rs @@ -6,6 +6,7 @@ use zksync_dal::DalError; use zksync_object_store::ObjectStoreError; pub(crate) enum RequestProcessorError { + GeneralError(String), ObjectStore(ObjectStoreError), Dal(DalError), } @@ -19,24 +20,26 @@ impl From for RequestProcessorError { impl IntoResponse for RequestProcessorError { fn into_response(self) -> Response { let (status_code, message) = match self { - RequestProcessorError::ObjectStore(err) => { + Self::GeneralError(err) => { + tracing::error!("Error: {:?}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "An internal error occurred".to_owned(), + ) + } + Self::ObjectStore(err) => { tracing::error!("GCS error: {:?}", err); ( StatusCode::BAD_GATEWAY, "Failed fetching/saving from GCS".to_owned(), ) } - RequestProcessorError::Dal(err) => { + Self::Dal(err) => { tracing::error!("Sqlx error: {:?}", err); - match err.inner() { - zksync_dal::SqlxError::RowNotFound => { - (StatusCode::NOT_FOUND, "Non existing L1 batch".to_owned()) - } - _ => ( - StatusCode::BAD_GATEWAY, - "Failed fetching/saving from db".to_owned(), - ), - } + ( + StatusCode::BAD_GATEWAY, + "Failed fetching/saving from db".to_owned(), + ) } }; (status_code, message).into_response() diff --git a/core/node/proof_data_handler/src/lib.rs b/core/node/proof_data_handler/src/lib.rs index 618a786ea65..e014fca15d7 100644 --- a/core/node/proof_data_handler/src/lib.rs +++ b/core/node/proof_data_handler/src/lib.rs @@ -1,7 +1,7 @@ use std::{net::SocketAddr, sync::Arc}; use anyhow::Context as _; -use axum::{extract::Path, routing::post, Json, Router}; +use axum::{extract::Path, http::StatusCode, response::IntoResponse, routing::post, Json, Router}; use request_processor::RequestProcessor; use tee_request_processor::TeeRequestProcessor; use tokio::sync::watch; @@ -12,7 +12,7 @@ use zksync_prover_interface::api::{ ProofGenerationDataRequest, RegisterTeeAttestationRequest, SubmitProofRequest, SubmitTeeProofRequest, TeeProofGenerationDataRequest, }; -use zksync_types::commitment::L1BatchCommitmentMode; +use zksync_types::{commitment::L1BatchCommitmentMode, L2ChainId}; #[cfg(test)] mod tests; @@ -27,11 +27,18 @@ pub async fn run_server( blob_store: Arc, connection_pool: ConnectionPool, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, mut stop_receiver: watch::Receiver, ) -> anyhow::Result<()> { let bind_address = SocketAddr::from(([0, 0, 0, 0], config.http_port)); - tracing::debug!("Starting proof data handler server on {bind_address}"); - let app = create_proof_processing_router(blob_store, connection_pool, config, commitment_mode); + tracing::info!("Starting proof data handler server on {bind_address}"); + let app = create_proof_processing_router( + blob_store, + connection_pool, + config, + commitment_mode, + l2_chain_id, + ); let listener = tokio::net::TcpListener::bind(bind_address) .await @@ -54,6 +61,7 @@ fn create_proof_processing_router( connection_pool: ConnectionPool, config: ProofDataHandlerConfig, commitment_mode: L1BatchCommitmentMode, + l2_chain_id: L2ChainId, ) -> Router { let get_proof_gen_processor = RequestProcessor::new( blob_store.clone(), @@ -86,9 +94,9 @@ fn create_proof_processing_router( ), ); - if config.tee_support { + if config.tee_config.tee_support { let get_tee_proof_gen_processor = - TeeRequestProcessor::new(blob_store, connection_pool, config.clone()); + TeeRequestProcessor::new(blob_store, connection_pool, config.clone(), l2_chain_id); let submit_tee_proof_processor = get_tee_proof_gen_processor.clone(); let register_tee_attestation_processor = get_tee_proof_gen_processor.clone(); @@ -96,9 +104,15 @@ fn create_proof_processing_router( "/tee/proof_inputs", post( move |payload: Json| async move { - get_tee_proof_gen_processor + let result = get_tee_proof_gen_processor .get_proof_generation_data(payload) - .await + .await; + + match result { + Ok(Some(data)) => (StatusCode::OK, data).into_response(), + Ok(None) => { StatusCode::NO_CONTENT.into_response()}, + Err(e) => e.into_response(), + } }, ), ) @@ -125,4 +139,6 @@ fn create_proof_processing_router( } router + .layer(tower_http::compression::CompressionLayer::new()) + .layer(tower_http::decompression::RequestDecompressionLayer::new().zstd(true)) } diff --git a/core/node/proof_data_handler/src/request_processor.rs b/core/node/proof_data_handler/src/request_processor.rs index eea3925bdd7..89304724a7c 100644 --- a/core/node/proof_data_handler/src/request_processor.rs +++ b/core/node/proof_data_handler/src/request_processor.rs @@ -244,11 +244,10 @@ impl RequestProcessor { || bootloader_heap_initial_content != bootloader_heap_initial_content_from_prover { - let server_values = format!("events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}"); - let prover_values = format!("events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}"); panic!( - "Auxilary output doesn't match, server values: {} prover values: {}", - server_values, prover_values + "Auxilary output doesn't match\n\ + server values: events_queue_state = {events_queue_state}, bootloader_heap_initial_content = {bootloader_heap_initial_content}\n\ + prover values: events_queue_state = {events_queue_state_from_prover}, bootloader_heap_initial_content = {bootloader_heap_initial_content_from_prover}", ); } @@ -261,8 +260,9 @@ impl RequestProcessor { .system_logs .iter() .find_map(|log| { - (log.0.key == H256::from_low_u64_be(STATE_DIFF_HASH_KEY_PRE_GATEWAY)) - .then_some(log.0.value) + (log.0.key + == H256::from_low_u64_be(STATE_DIFF_HASH_KEY_PRE_GATEWAY as u64)) + .then_some(log.0.value) }) .expect("Failed to get state_diff_hash from system logs") } else { diff --git a/core/node/proof_data_handler/src/tee_request_processor.rs b/core/node/proof_data_handler/src/tee_request_processor.rs index 4ae1a5026f1..8e06d0c26bc 100644 --- a/core/node/proof_data_handler/src/tee_request_processor.rs +++ b/core/node/proof_data_handler/src/tee_request_processor.rs @@ -4,11 +4,17 @@ use axum::{extract::Path, Json}; use zksync_config::configs::ProofDataHandlerConfig; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_object_store::{ObjectStore, ObjectStoreError}; -use zksync_prover_interface::api::{ - RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, - SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, +use zksync_prover_interface::{ + api::{ + RegisterTeeAttestationRequest, RegisterTeeAttestationResponse, SubmitProofResponse, + SubmitTeeProofRequest, TeeProofGenerationDataRequest, TeeProofGenerationDataResponse, + }, + inputs::{ + TeeVerifierInput, V1TeeVerifierInput, VMRunWitnessInputData, WitnessInputMerklePaths, + }, }; -use zksync_types::{tee_types::TeeType, L1BatchNumber}; +use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; +use zksync_vm_executor::storage::L1BatchParamsProvider; use crate::errors::RequestProcessorError; @@ -17,6 +23,7 @@ pub(crate) struct TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, } impl TeeRequestProcessor { @@ -24,45 +31,52 @@ impl TeeRequestProcessor { blob_store: Arc, pool: ConnectionPool, config: ProofDataHandlerConfig, + l2_chain_id: L2ChainId, ) -> Self { Self { blob_store, pool, config, + l2_chain_id, } } pub(crate) async fn get_proof_generation_data( &self, request: Json, - ) -> Result, RequestProcessorError> { + ) -> Result>, RequestProcessorError> { tracing::info!("Received request for proof generation data: {:?}", request); - let mut min_batch_number: Option = None; + let mut min_batch_number = self.config.tee_config.first_tee_processed_batch; let mut missing_range: Option<(L1BatchNumber, L1BatchNumber)> = None; let result = loop { - let l1_batch_number = match self + let Some(l1_batch_number) = self .lock_batch_for_proving(request.tee_type, min_batch_number) .await? - { - Some(number) => number, - None => break Ok(Json(TeeProofGenerationDataResponse(None))), + else { + // No job available + return Ok(None); }; - match self.blob_store.get(l1_batch_number).await { - Ok(input) => break Ok(Json(TeeProofGenerationDataResponse(Some(Box::new(input))))), - Err(ObjectStoreError::KeyNotFound(_)) => { + match self + .tee_verifier_input_for_existing_batch(l1_batch_number) + .await + { + Ok(input) => { + break Ok(Some(Json(TeeProofGenerationDataResponse(Box::new(input))))); + } + Err(RequestProcessorError::ObjectStore(ObjectStoreError::KeyNotFound(_))) => { missing_range = match missing_range { Some((start, _)) => Some((start, l1_batch_number)), None => Some((l1_batch_number, l1_batch_number)), }; self.unlock_batch(l1_batch_number, request.tee_type).await?; - min_batch_number = Some(min_batch_number.unwrap_or(l1_batch_number) + 1); + min_batch_number = l1_batch_number + 1; } Err(err) => { self.unlock_batch(l1_batch_number, request.tee_type).await?; - break Err(RequestProcessorError::ObjectStore(err)); + break Err(err); } } }; @@ -78,14 +92,74 @@ impl TeeRequestProcessor { result } + #[tracing::instrument(skip(self))] + async fn tee_verifier_input_for_existing_batch( + &self, + l1_batch_number: L1BatchNumber, + ) -> Result { + let vm_run_data: VMRunWitnessInputData = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let merkle_paths: WitnessInputMerklePaths = self + .blob_store + .get(l1_batch_number) + .await + .map_err(RequestProcessorError::ObjectStore)?; + + let mut connection = self + .pool + .connection_tagged("tee_request_processor") + .await + .map_err(RequestProcessorError::Dal)?; + + let l2_blocks_execution_data = connection + .transactions_dal() + .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) + .await + .map_err(RequestProcessorError::Dal)?; + + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))?; + + // In the state keeper, this value is used to reject execution. + // All batches have already been executed by State Keeper. + // This means we don't want to reject any execution, therefore we're using MAX as an allow all. + let validation_computational_gas_limit = u32::MAX; + + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params_provider + .load_l1_batch_env( + &mut connection, + l1_batch_number, + validation_computational_gas_limit, + self.l2_chain_id, + ) + .await + .map_err(|err| RequestProcessorError::GeneralError(err.to_string()))? + .ok_or(RequestProcessorError::GeneralError( + "system_env, l1_batch_env missing".into(), + ))?; + + Ok(TeeVerifierInput::new(V1TeeVerifierInput { + vm_run_data, + merkle_paths, + l2_blocks_execution_data, + l1_batch_env, + system_env, + pubdata_params, + })) + } + async fn lock_batch_for_proving( &self, tee_type: TeeType, - min_batch_number: Option, + min_batch_number: L1BatchNumber, ) -> Result, RequestProcessorError> { - let result = self - .pool - .connection() + self.pool + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .lock_batch_for_proving( @@ -93,8 +167,8 @@ impl TeeRequestProcessor { self.config.proof_generation_timeout(), min_batch_number, ) - .await?; - Ok(result) + .await + .map_err(RequestProcessorError::Dal) } async fn unlock_batch( @@ -103,7 +177,7 @@ impl TeeRequestProcessor { tee_type: TeeType, ) -> Result<(), RequestProcessorError> { self.pool - .connection() + .connection_tagged("tee_request_processor") .await? .tee_proof_generation_dal() .unlock_batch(l1_batch_number, tee_type) @@ -117,7 +191,7 @@ impl TeeRequestProcessor { Json(proof): Json, ) -> Result, RequestProcessorError> { let l1_batch_number = L1BatchNumber(l1_batch_number); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); tracing::info!( @@ -143,7 +217,7 @@ impl TeeRequestProcessor { ) -> Result, RequestProcessorError> { tracing::info!("Received attestation: {:?}", payload); - let mut connection = self.pool.connection().await?; + let mut connection = self.pool.connection_tagged("tee_request_processor").await?; let mut dal = connection.tee_proof_generation_dal(); dal.save_attestation(&payload.pubkey, &payload.attestation) diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 4319fce6216..63ea087a81c 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -1,5 +1,3 @@ -use std::time::Instant; - use axum::{ body::Body, http::{self, Method, Request, StatusCode}, @@ -8,128 +6,67 @@ use axum::{ }; use serde_json::json; use tower::ServiceExt; -use zksync_basic_types::U256; -use zksync_config::configs::ProofDataHandlerConfig; -use zksync_contracts::{BaseSystemContracts, SystemContractCode}; +use zksync_basic_types::L2ChainId; +use zksync_config::configs::{ProofDataHandlerConfig, TeeConfig}; use zksync_dal::{ConnectionPool, CoreDal}; -use zksync_multivm::interface::{L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}; use zksync_object_store::MockObjectStore; -use zksync_prover_interface::{ - api::SubmitTeeProofRequest, - inputs::{TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths}, -}; -use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber, H256}; +use zksync_prover_interface::api::SubmitTeeProofRequest; +use zksync_types::{commitment::L1BatchCommitmentMode, tee_types::TeeType, L1BatchNumber}; use crate::create_proof_processing_router; -// Test the /tee/proof_inputs endpoint by: -// 1. Mocking an object store with a single batch blob containing TEE verifier input -// 2. Populating the SQL db with relevant information about the status of the TEE verifier input and -// TEE proof generation -// 3. Sending a request to the /tee/proof_inputs endpoint and asserting that the response -// matches the file from the object store #[tokio::test] async fn request_tee_proof_inputs() { - // prepare a sample mocked TEE verifier input - - let batch_number = L1BatchNumber::from(1); - let tvi = V1TeeVerifierInput::new( - WitnessInputMerklePaths::new(0), - vec![], - L1BatchEnv { - previous_batch_hash: Some(H256([1; 32])), - number: batch_number, - timestamp: 0, - fee_input: Default::default(), - fee_account: Default::default(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 0, - timestamp: 0, - prev_block_hash: H256([1; 32]), - max_virtual_blocks_to_create: 0, - }, - }, - SystemEnv { - zk_porter_available: false, - version: Default::default(), - base_system_smart_contracts: BaseSystemContracts { - bootloader: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - default_aa: SystemContractCode { - code: vec![U256([1; 4])], - hash: H256([1; 32]), - }, - }, - bootloader_gas_limit: 0, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: 0, - chain_id: Default::default(), - pubdata_params: Default::default(), - }, - vec![(H256([1; 32]), vec![0, 1, 2, 3, 4])], - ); - let tvi = TeeVerifierInput::V1(tvi); - - // populate mocked object store with a single batch blob - - let blob_store = MockObjectStore::arc(); - let object_path = blob_store.put(batch_number, &tvi).await.unwrap(); - - // get connection to the SQL db and mock the status of the TEE proof generation - let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, &object_path).await; - - // test the /tee/proof_inputs endpoint; it should return the batch from the object store let app = create_proof_processing_router( - blob_store, - db_conn_pool, + MockObjectStore::arc(), + db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + }, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); - let req_body = Body::from(serde_json::to_vec(&json!({ "tee_type": "sgx" })).unwrap()); - let response = app - .oneshot( - Request::builder() - .method(Method::POST) - .uri("/tee/proof_inputs") - .header(http::header::CONTENT_TYPE, "application/json") - .body(req_body) - .unwrap(), - ) - .await - .unwrap(); - - assert_eq!(response.status(), StatusCode::OK); - - let body = axum::body::to_bytes(response.into_body(), usize::MAX) - .await - .unwrap(); - let json: serde_json::Value = serde_json::from_slice(&body).unwrap(); - let deserialized: TeeVerifierInput = serde_json::from_value(json).unwrap(); - - assert_eq!(tvi, deserialized); + let test_cases = vec![ + (json!({ "tee_type": "sgx" }), StatusCode::NO_CONTENT), + ( + json!({ "tee_type": "Sgx" }), + StatusCode::UNPROCESSABLE_ENTITY, + ), + ]; + + for (body, expected_status) in test_cases { + let req_body = Body::from(serde_json::to_vec(&body).unwrap()); + let response = app + .clone() + .oneshot( + Request::builder() + .method(Method::POST) + .uri("/tee/proof_inputs") + .header(http::header::CONTENT_TYPE, "application/json") + .body(req_body) + .unwrap(), + ) + .await + .unwrap(); + + assert_eq!(response.status(), expected_status); + } } // Test /tee/submit_proofs endpoint using a mocked TEE proof and verify response and db state #[tokio::test] async fn submit_tee_proof() { - let blob_store = MockObjectStore::arc(); - let db_conn_pool = ConnectionPool::test_pool().await; - let object_path = "mocked_object_path"; let batch_number = L1BatchNumber::from(1); + let db_conn_pool = ConnectionPool::test_pool().await; - mock_tee_batch_status(db_conn_pool.clone(), batch_number, object_path).await; - - // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof + mock_tee_batch_status(db_conn_pool.clone(), batch_number).await; let tee_proof_request_str = r#"{ "signature": "0001020304", @@ -141,14 +78,18 @@ async fn submit_tee_proof() { serde_json::from_str::(tee_proof_request_str).unwrap(); let uri = format!("/tee/submit_proofs/{}", batch_number.0); let app = create_proof_processing_router( - blob_store, + MockObjectStore::arc(), db_conn_pool.clone(), ProofDataHandlerConfig { http_port: 1337, proof_generation_timeout_in_secs: 10, - tee_support: true, + tee_config: TeeConfig { + tee_support: true, + first_tee_processed_batch: L1BatchNumber(0), + }, }, L1BatchCommitmentMode::Rollup, + L2ChainId::default(), ); // this should fail because we haven't saved the attestation for the pubkey yet @@ -207,32 +148,15 @@ async fn submit_tee_proof() { async fn mock_tee_batch_status( db_conn_pool: ConnectionPool, batch_number: L1BatchNumber, - object_path: &str, ) { let mut proof_db_conn = db_conn_pool.connection().await.unwrap(); let mut proof_dal = proof_db_conn.tee_proof_generation_dal(); - let mut input_db_conn = db_conn_pool.connection().await.unwrap(); - let mut input_producer_dal = input_db_conn.tee_verifier_input_producer_dal(); // there should not be any batches awaiting proof in the db yet let oldest_batch_number = proof_dal.get_oldest_unpicked_batch().await.unwrap(); assert!(oldest_batch_number.is_none()); - // mock SQL table with relevant information about the status of the TEE verifier input - - input_producer_dal - .create_tee_verifier_input_producer_job(batch_number) - .await - .expect("Failed to create tee_verifier_input_producer_job"); - - // pretend that the TEE verifier input blob file was fetched successfully - - input_producer_dal - .mark_job_as_successful(batch_number, Instant::now(), object_path) - .await - .expect("Failed to mark tee_verifier_input_producer_job job as successful"); - // mock SQL table with relevant information about the status of TEE proof generation proof_dal diff --git a/core/node/shared_metrics/src/lib.rs b/core/node/shared_metrics/src/lib.rs index e0a7fa74ef4..2c41ec9293a 100644 --- a/core/node/shared_metrics/src/lib.rs +++ b/core/node/shared_metrics/src/lib.rs @@ -29,7 +29,6 @@ pub enum InitStage { EthTxAggregator, EthTxManager, Tree, - TeeVerifierInputProducer, Consensus, DADispatcher, } @@ -45,7 +44,6 @@ impl fmt::Display for InitStage { Self::EthTxAggregator => formatter.write_str("eth_tx_aggregator"), Self::EthTxManager => formatter.write_str("eth_tx_manager"), Self::Tree => formatter.write_str("tree"), - Self::TeeVerifierInputProducer => formatter.write_str("tee_verifier_input_producer"), Self::Consensus => formatter.write_str("consensus"), Self::DADispatcher => formatter.write_str("da_dispatcher"), } diff --git a/core/node/state_keeper/src/executor/mod.rs b/core/node/state_keeper/src/executor/mod.rs index 2fa5c3b9c12..903dae2f1ca 100644 --- a/core/node/state_keeper/src/executor/mod.rs +++ b/core/node/state_keeper/src/executor/mod.rs @@ -40,7 +40,7 @@ impl TxExecutionResult { _ => Self::Success { tx_metrics: Box::new(ExecutionMetricsForCriteria::new(Some(tx), &res.tx_result)), gas_remaining: res.tx_result.statistics.gas_remaining, - tx_result: res.tx_result, + tx_result: res.tx_result.clone(), compressed_bytecodes: res.compressed_bytecodes, call_tracer_result: res.call_traces, }, diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index a7fac40236c..cc7945dfa86 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -26,7 +26,7 @@ use zksync_state::{OwnedStorage, ReadStorageFactory, RocksdbStorageOptions}; use zksync_test_account::{Account, DeployContractsTx, TxType}; use zksync_types::{ block::L2BlockHasher, - commitment::{L1BatchCommitmentMode, PubdataParams}, + commitment::PubdataParams, ethabi::Token, get_code_key, get_known_code_key, protocol_version::ProtocolSemanticVersion, @@ -126,10 +126,9 @@ impl Tester { &mut self, storage_type: StorageType, ) -> Box> { - let (l1_batch_env, system_env) = self.default_batch_params(); + let (l1_batch_env, system_env, pubdata_params) = self.default_batch_params(); match storage_type { StorageType::AsyncRocksdbCache => { - let (l1_batch_env, system_env) = self.default_batch_params(); let (state_keeper_storage, task) = AsyncRocksdbCache::new( self.pool(), self.state_keeper_db_path(), @@ -144,6 +143,7 @@ impl Tester { Arc::new(state_keeper_storage), l1_batch_env, system_env, + pubdata_params, ) .await } @@ -155,12 +155,18 @@ impl Tester { )), l1_batch_env, system_env, + pubdata_params, ) .await } StorageType::Postgres => { - self.create_batch_executor_inner(Arc::new(self.pool()), l1_batch_env, system_env) - .await + self.create_batch_executor_inner( + Arc::new(self.pool()), + l1_batch_env, + system_env, + pubdata_params, + ) + .await } } } @@ -170,6 +176,7 @@ impl Tester { storage_factory: Arc, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Box> { let (_stop_sender, stop_receiver) = watch::channel(false); let storage = storage_factory @@ -180,11 +187,11 @@ impl Tester { if self.config.trace_calls { let mut executor = MainBatchExecutorFactory::::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } else { let mut executor = MainBatchExecutorFactory::<()>::new(false); executor.set_fast_vm_mode(self.config.fast_vm_mode); - executor.init_batch(storage, l1_batch_env, system_env) + executor.init_batch(storage, l1_batch_env, system_env, pubdata_params) } } @@ -234,7 +241,7 @@ impl Tester { snapshot: &SnapshotRecoveryStatus, ) -> Box> { let current_timestamp = snapshot.l2_block_timestamp + 1; - let (mut l1_batch_env, system_env) = + let (mut l1_batch_env, system_env, pubdata_params) = self.batch_params(snapshot.l1_batch_number + 1, current_timestamp); l1_batch_env.previous_batch_hash = Some(snapshot.l1_batch_root_hash); l1_batch_env.first_l2_block = L2BlockEnv { @@ -244,11 +251,11 @@ impl Tester { max_virtual_blocks_to_create: 1, }; - self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env) + self.create_batch_executor_inner(storage_factory, l1_batch_env, system_env, pubdata_params) .await } - pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv) { + pub(super) fn default_batch_params(&self) -> (L1BatchEnv, SystemEnv, PubdataParams) { // Not really important for the batch executor - it operates over a single batch. self.batch_params(L1BatchNumber(1), 100) } @@ -258,20 +265,16 @@ impl Tester { &self, l1_batch_number: L1BatchNumber, timestamp: u64, - ) -> (L1BatchEnv, SystemEnv) { + ) -> (L1BatchEnv, SystemEnv, PubdataParams) { let mut system_params = default_system_env(); if let Some(vm_gas_limit) = self.config.vm_gas_limit { system_params.bootloader_gas_limit = vm_gas_limit; } system_params.default_validation_computational_gas_limit = self.config.validation_computational_gas_limit; - system_params.pubdata_params = PubdataParams { - l2_da_validator_address: get_da_contract_address(), - pubdata_type: L1BatchCommitmentMode::Rollup, - }; let mut batch_params = default_l1_batch_env(l1_batch_number.0, timestamp, self.fee_account); batch_params.previous_batch_hash = Some(H256::zero()); // Not important in this context. - (batch_params, system_params) + (batch_params, system_params, PubdataParams::default()) } /// Performs the genesis in the storage. @@ -285,7 +288,7 @@ impl Tester { patch: 0.into(), }, &BASE_SYSTEM_CONTRACTS, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), Default::default(), ) .await diff --git a/core/node/state_keeper/src/io/common/mod.rs b/core/node/state_keeper/src/io/common/mod.rs index 6bd881414a2..867ffa7fb37 100644 --- a/core/node/state_keeper/src/io/common/mod.rs +++ b/core/node/state_keeper/src/io/common/mod.rs @@ -3,7 +3,7 @@ use std::time::Duration; use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal}; use zksync_multivm::interface::{L1BatchEnv, SystemEnv}; -use zksync_types::{L1BatchNumber, L2BlockNumber, H256}; +use zksync_types::{commitment::PubdataParams, L1BatchNumber, L2BlockNumber, H256}; use super::PendingBatchData; @@ -85,6 +85,7 @@ pub async fn load_pending_batch( storage: &mut Connection<'_, Core>, system_env: SystemEnv, l1_batch_env: L1BatchEnv, + pubdata_params: PubdataParams, ) -> anyhow::Result { let pending_l2_blocks = storage .transactions_dal() @@ -104,6 +105,7 @@ pub async fn load_pending_batch( Ok(PendingBatchData { l1_batch_env, system_env, + pubdata_params, pending_l2_blocks, }) } diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index b2a24acb495..ec9f906b1cd 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -318,7 +318,7 @@ async fn loading_pending_batch_with_genesis() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, L1BatchNumber(1), @@ -331,7 +331,7 @@ async fn loading_pending_batch_with_genesis() { assert_eq!(l1_batch_env.first_l2_block.number, 1); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); @@ -396,7 +396,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await; let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); - let (system_env, l1_batch_env) = provider + let (system_env, l1_batch_env, pubdata_params) = provider .load_l1_batch_env( &mut storage, snapshot_recovery.l1_batch_number + 1, @@ -406,7 +406,7 @@ async fn loading_pending_batch_after_snapshot_recovery() { .await .unwrap() .expect("no L1 batch"); - let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) .await .unwrap(); diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 58e0d56be2d..dfddd36aba7 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -14,6 +14,7 @@ use zksync_mempool::L2TxFilter; use zksync_multivm::{interface::Halt, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ + block::UnsealedL1BatchHeader, commitment::{L1BatchCommitmentMode, PubdataParams}, protocol_upgrade::ProtocolUpgradeTx, utils::display_timestamp, @@ -51,14 +52,14 @@ pub struct MempoolIO { filter: L2TxFilter, l1_batch_params_provider: L1BatchParamsProvider, fee_account: Address, - l2_da_validator_address: Option
, - pubdata_type: L1BatchCommitmentMode, validation_computational_gas_limit: u32, max_allowed_tx_gas_limit: U256, delay_interval: Duration, // Used to keep track of gas prices to set accepted price per pubdata byte in blocks. batch_fee_input_provider: Arc, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, } impl IoSealCriteria for MempoolIO { @@ -101,7 +102,7 @@ impl StateKeeperIO for MempoolIO { L2BlockSealProcess::clear_pending_l2_block(&mut storage, cursor.next_l2_block - 1).await?; - let Some((system_env, l1_batch_env)) = self + let Some((system_env, l1_batch_env, pubdata_params)) = self .l1_batch_params_provider .load_l1_batch_env( &mut storage, @@ -113,38 +114,39 @@ impl StateKeeperIO for MempoolIO { else { return Ok((cursor, None)); }; - let pending_batch_data = load_pending_batch(&mut storage, system_env, l1_batch_env) - .await - .with_context(|| { - format!( - "failed loading data for re-execution for pending L1 batch #{}", - cursor.l1_batch - ) - })?; + let pending_batch_data = + load_pending_batch(&mut storage, system_env, l1_batch_env, pubdata_params) + .await + .with_context(|| { + format!( + "failed loading data for re-execution for pending L1 batch #{}", + cursor.l1_batch + ) + })?; - let PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - } = pending_batch_data; // Initialize the filter for the transactions that come after the pending batch. // We use values from the pending block to match the filter with one used before the restart. - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(l1_batch_env.fee_input, system_env.version.into()); + let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + pending_batch_data.l1_batch_env.fee_input, + pending_batch_data.system_env.version.into(), + ); self.filter = L2TxFilter { - fee_input: l1_batch_env.fee_input, + fee_input: pending_batch_data.l1_batch_env.fee_input, fee_per_gas: base_fee, gas_per_pubdata: gas_per_pubdata as u32, }; - Ok(( - cursor, - Some(PendingBatchData { - l1_batch_env, - system_env, - pending_l2_blocks, - }), - )) + storage + .blocks_dal() + .ensure_unsealed_l1_batch_exists( + pending_batch_data + .l1_batch_env + .clone() + .into_unsealed_header(Some(pending_batch_data.system_env.version)), + ) + .await?; + + Ok((cursor, Some(pending_batch_data))) } async fn wait_for_new_batch_params( @@ -152,6 +154,32 @@ impl StateKeeperIO for MempoolIO { cursor: &IoCursor, max_wait: Duration, ) -> anyhow::Result> { + // Check if there is an existing unsealed batch + if let Some(unsealed_storage_batch) = self + .pool + .connection_tagged("state_keeper") + .await? + .blocks_dal() + .get_unsealed_l1_batch() + .await? + { + let protocol_version = unsealed_storage_batch + .protocol_version + .context("unsealed batch is missing protocol version")?; + return Ok(Some(L1BatchParams { + protocol_version, + validation_computational_gas_limit: self.validation_computational_gas_limit, + operator_address: unsealed_storage_batch.fee_address, + fee_input: unsealed_storage_batch.fee_input, + first_l2_block: L2BlockParams { + timestamp: unsealed_storage_batch.timestamp, + // This value is effectively ignored by the protocol. + virtual_blocks: 1, + }, + pubdata_params: self.pubdata_params(protocol_version)?, + })); + } + let deadline = Instant::now() + max_wait; // Block until at least one transaction in the mempool can match the filter (or timeout happens). @@ -195,17 +223,18 @@ impl StateKeeperIO for MempoolIO { continue; } - let pubdata_params = match ( - protocol_version.is_pre_gateway(), - self.l2_da_validator_address, - ) { - (true, _) => PubdataParams::default(), - (false, Some(l2_da_validator_address)) => PubdataParams { - l2_da_validator_address, - pubdata_type: self.pubdata_type, - }, - (false, None) => anyhow::bail!("L2 DA validator address not found"), - }; + self.pool + .connection_tagged("state_keeper") + .await? + .blocks_dal() + .insert_l1_batch(UnsealedL1BatchHeader { + number: cursor.l1_batch, + timestamp, + protocol_version: Some(protocol_version), + fee_address: self.fee_account, + fee_input: self.filter.fee_input, + }) + .await?; return Ok(Some(L1BatchParams { protocol_version, @@ -217,7 +246,7 @@ impl StateKeeperIO for MempoolIO { // This value is effectively ignored by the protocol. virtual_blocks: 1, }, - pubdata_params, + pubdata_params: self.pubdata_params(protocol_version)?, })); } Ok(None) @@ -432,10 +461,10 @@ impl MempoolIO { pool: ConnectionPool, config: &StateKeeperConfig, fee_account: Address, - l2_da_validator_address: Option
, - pubdata_type: L1BatchCommitmentMode, delay_interval: Duration, chain_id: L2ChainId, + l2_da_validator_address: Option
, + pubdata_type: L1BatchCommitmentMode, ) -> anyhow::Result { Ok(Self { mempool, @@ -446,15 +475,31 @@ impl MempoolIO { // ^ Will be initialized properly on the first newly opened batch l1_batch_params_provider: L1BatchParamsProvider::uninitialized(), fee_account, - l2_da_validator_address, - pubdata_type, validation_computational_gas_limit: config.validation_computational_gas_limit, max_allowed_tx_gas_limit: config.max_allowed_l2_tx_gas_limit.into(), delay_interval, batch_fee_input_provider, chain_id, + l2_da_validator_address, + pubdata_type, }) } + + fn pubdata_params(&self, protocol_version: ProtocolVersionId) -> anyhow::Result { + let pubdata_params = match ( + protocol_version.is_pre_gateway(), + self.l2_da_validator_address, + ) { + (true, _) => PubdataParams::default(), + (false, Some(l2_da_validator_address)) => PubdataParams { + l2_da_validator_address, + pubdata_type: self.pubdata_type, + }, + (false, None) => anyhow::bail!("L2 DA validator address not found"), + }; + + Ok(pubdata_params) + } } /// Getters required for testing the MempoolIO. diff --git a/core/node/state_keeper/src/io/mod.rs b/core/node/state_keeper/src/io/mod.rs index a7fd49637f2..e2461e72d7b 100644 --- a/core/node/state_keeper/src/io/mod.rs +++ b/core/node/state_keeper/src/io/mod.rs @@ -39,11 +39,12 @@ pub struct PendingBatchData { /// (e.g. timestamp) are the same, so transaction would have the same result after re-execution. pub(crate) l1_batch_env: L1BatchEnv, pub(crate) system_env: SystemEnv, + pub(crate) pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub(crate) pending_l2_blocks: Vec, } -#[derive(Debug, Copy, Clone, Default)] +#[derive(Debug, Copy, Clone, Default, PartialEq)] pub struct L2BlockParams { /// The timestamp of the L2 block. pub timestamp: u64, @@ -59,7 +60,7 @@ pub struct L2BlockParams { } /// Parameters for a new L1 batch returned by [`StateKeeperIO::wait_for_new_batch_params()`]. -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct L1BatchParams { /// Protocol version for the new L1 batch. pub protocol_version: ProtocolVersionId, @@ -71,7 +72,7 @@ pub struct L1BatchParams { pub fee_input: BatchFeeInput, /// Parameters of the first L2 block in the batch. pub first_l2_block: L2BlockParams, - /// Params related to how the pubdata should be processed by the bootloader in the batch + /// Params related to how the pubdata should be processed by the bootloader in the batch. pub pubdata_params: PubdataParams, } @@ -82,8 +83,8 @@ impl L1BatchParams { contracts: BaseSystemContracts, cursor: &IoCursor, previous_batch_hash: H256, - ) -> (SystemEnv, L1BatchEnv) { - l1_batch_params( + ) -> (SystemEnv, L1BatchEnv, PubdataParams) { + let (system_env, l1_batch_env) = l1_batch_params( cursor.l1_batch, self.operator_address, self.first_l2_block.timestamp, @@ -96,8 +97,9 @@ impl L1BatchParams { self.protocol_version, self.first_l2_block.virtual_blocks, chain_id, - self.pubdata_params, - ) + ); + + (system_env, l1_batch_env, self.pubdata_params) } } diff --git a/core/node/state_keeper/src/io/persistence.rs b/core/node/state_keeper/src/io/persistence.rs index dc854c9e58f..06f1972a02a 100644 --- a/core/node/state_keeper/src/io/persistence.rs +++ b/core/node/state_keeper/src/io/persistence.rs @@ -7,7 +7,7 @@ use async_trait::async_trait; use tokio::sync::{mpsc, oneshot}; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_shared_metrics::{BlockStage, APP_METRICS}; -use zksync_types::{writes::TreeWrite, Address}; +use zksync_types::{writes::TreeWrite, Address, ProtocolVersionId}; use zksync_utils::u256_to_h256; use crate::{ @@ -41,13 +41,45 @@ pub struct StateKeeperPersistence { impl StateKeeperPersistence { const SHUTDOWN_MSG: &'static str = "L2 block sealer unexpectedly shut down"; + async fn validate_l2_legacy_shared_bridge_addr( + pool: &ConnectionPool, + l2_legacy_shared_bridge_addr: Option
, + ) -> anyhow::Result<()> { + let mut connection = pool.connection_tagged("state_keeper").await?; + + if let Some(l2_block) = connection + .blocks_dal() + .get_earliest_l2_block_number() + .await + .context("failed to load earliest l2 block number")? + { + let header = connection + .blocks_dal() + .get_l2_block_header(l2_block) + .await + .context("failed to load L2 block header")? + .context("missing L2 block header")?; + let protocol_version = header + .protocol_version + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); + + if protocol_version.is_pre_gateway() && l2_legacy_shared_bridge_addr.is_none() { + anyhow::bail!("Missing `l2_legacy_shared_bridge_addr` for chain that was initialized before gateway upgrade"); + } + } + + Ok(()) + } + /// Creates a sealer that will use the provided Postgres connection and will have the specified /// `command_capacity` for unprocessed sealing commands. - pub fn new( + pub async fn new( pool: ConnectionPool, l2_legacy_shared_bridge_addr: Option
, mut command_capacity: usize, - ) -> (Self, L2BlockSealerTask) { + ) -> anyhow::Result<(Self, L2BlockSealerTask)> { + Self::validate_l2_legacy_shared_bridge_addr(&pool, l2_legacy_shared_bridge_addr).await?; + let is_sync = command_capacity == 0; command_capacity = command_capacity.max(1); @@ -67,7 +99,7 @@ impl StateKeeperPersistence { latest_completion_receiver: None, is_sync, }; - (this, sealer) + Ok((this, sealer)) } pub fn with_tx_insertion(mut self) -> Self { @@ -347,7 +379,7 @@ impl StateKeeperOutputHandler for TreeWritesPersistence { #[cfg(test)] mod tests { - use std::collections::HashSet; + use std::collections::{HashMap, HashSet}; use assert_matches::assert_matches; use futures::FutureExt; @@ -396,11 +428,13 @@ mod tests { pool.clone(), Some(Address::default()), l2_block_sealer_capacity, - ); + ) + .await + .unwrap(); let mut output_handler = OutputHandler::new(Box::new(persistence)) .with_handler(Box::new(TreeWritesPersistence::new(pool.clone()))); tokio::spawn(l2_block_sealer.run()); - execute_mock_batch(&mut output_handler).await; + execute_mock_batch(&mut output_handler, &pool).await; // Check that L2 block #1 and L1 batch #1 are persisted. let mut storage = pool.connection().await.unwrap(); @@ -449,9 +483,20 @@ mod tests { assert_eq!(actual_index, expected_index); } - async fn execute_mock_batch(output_handler: &mut OutputHandler) -> H256 { + async fn execute_mock_batch( + output_handler: &mut OutputHandler, + pool: &ConnectionPool, + ) -> H256 { let l1_batch_env = default_l1_batch_env(1, 1, Address::random()); - let mut updates = UpdatesManager::new(&l1_batch_env, &default_system_env()); + let mut updates = + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()); + pool.connection() + .await + .unwrap() + .blocks_dal() + .insert_l1_batch(l1_batch_env.into_unsealed_header(None)) + .await + .unwrap(); let tx = create_transaction(10, 100); let tx_hash = tx.hash(); @@ -465,6 +510,7 @@ mod tests { tx, tx_result, vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], @@ -530,12 +576,14 @@ mod tests { drop(storage); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(pool.clone(), Some(Address::default()), 1); + StateKeeperPersistence::new(pool.clone(), Some(Address::default()), 1) + .await + .unwrap(); persistence = persistence.with_tx_insertion().without_protective_reads(); let mut output_handler = OutputHandler::new(Box::new(persistence)); tokio::spawn(l2_block_sealer.run()); - let tx_hash = execute_mock_batch(&mut output_handler).await; + let tx_hash = execute_mock_batch(&mut output_handler, &pool).await; // Check that the transaction is persisted. let mut storage = pool.connection().await.unwrap(); @@ -569,7 +617,9 @@ mod tests { async fn l2_block_sealer_handle_blocking() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Some(Address::default()), 1); + StateKeeperPersistence::new(pool, Some(Address::default()), 1) + .await + .unwrap(); // The first command should be successfully submitted immediately. let mut updates_manager = create_updates_manager(); @@ -620,7 +670,9 @@ mod tests { async fn l2_block_sealer_handle_parallel_processing() { let pool = ConnectionPool::constrained_test_pool(1).await; let (mut persistence, mut sealer) = - StateKeeperPersistence::new(pool, Some(Address::default()), 5); + StateKeeperPersistence::new(pool, Some(Address::default()), 5) + .await + .unwrap(); // 5 L2 block sealing commands can be submitted without blocking. let mut updates_manager = create_updates_manager(); diff --git a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs index 9a57da5e18d..4fc58bce5c9 100644 --- a/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs +++ b/core/node/state_keeper/src/io/seal_logic/l2_block_seal_subtasks.rs @@ -333,12 +333,11 @@ impl L2BlockSealSubtask for InsertTokensSubtask { connection: &mut Connection<'_, Core>, ) -> anyhow::Result<()> { let is_fictive = command.is_l2_block_fictive(); + let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::ExtractAddedTokens, is_fictive); let token_deployer_address = command .l2_legacy_shared_bridge_addr .unwrap_or(L2_NATIVE_TOKEN_VAULT_ADDRESS); - let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::ExtractAddedTokens, is_fictive); let added_tokens = extract_added_tokens(token_deployer_address, &command.l2_block.events); - progress.observe(added_tokens.len()); let progress = L2_BLOCK_METRICS.start(L2BlockSealStage::InsertTokens, is_fictive); @@ -550,7 +549,6 @@ mod tests { virtual_blocks: Default::default(), protocol_version: ProtocolVersionId::latest(), }, - pubdata_params: PubdataParams::default(), first_tx_index: 0, fee_account_address: Default::default(), fee_input: Default::default(), @@ -559,6 +557,7 @@ mod tests { protocol_version: Some(ProtocolVersionId::latest()), l2_legacy_shared_bridge_addr: Default::default(), pre_insert_txs: false, + pubdata_params: PubdataParams::default(), }; // Run. @@ -614,7 +613,6 @@ mod tests { l2_tx_count: 1, fee_account_address: l2_block_seal_command.fee_account_address, base_fee_per_gas: l2_block_seal_command.base_fee_per_gas, - pubdata_params: l2_block_seal_command.pubdata_params, batch_fee_input: l2_block_seal_command.fee_input, base_system_contracts_hashes: l2_block_seal_command.base_system_contracts_hashes, protocol_version: l2_block_seal_command.protocol_version, @@ -622,6 +620,7 @@ mod tests { virtual_blocks: l2_block_seal_command.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(VmVersion::latest()), logs_bloom: Default::default(), + pubdata_params: l2_block_seal_command.pubdata_params, }; connection .protocol_versions_dal() diff --git a/core/node/state_keeper/src/io/seal_logic/mod.rs b/core/node/state_keeper/src/io/seal_logic/mod.rs index dce0ae090de..7f05bda7a6f 100644 --- a/core/node/state_keeper/src/io/seal_logic/mod.rs +++ b/core/node/state_keeper/src/io/seal_logic/mod.rs @@ -132,6 +132,7 @@ impl UpdatesManager { protocol_version: Some(self.protocol_version()), system_logs: finished_batch.final_execution_state.system_logs.clone(), pubdata_input: finished_batch.pubdata_input.clone(), + fee_address: self.fee_account_address, }; let final_bootloader_memory = finished_batch @@ -141,7 +142,7 @@ impl UpdatesManager { transaction .blocks_dal() - .insert_l1_batch( + .mark_l1_batch_as_sealed( &l1_batch, &final_bootloader_memory, self.pending_l1_gas_count(), @@ -382,7 +383,6 @@ impl L2BlockSealCommand { l1_tx_count: l1_tx_count as u16, l2_tx_count: l2_tx_count as u16, fee_account_address: self.fee_account_address, - pubdata_params: self.pubdata_params, base_fee_per_gas: self.base_fee_per_gas, batch_fee_input: self.fee_input, base_system_contracts_hashes: self.base_system_contracts_hashes, @@ -391,6 +391,7 @@ impl L2BlockSealCommand { virtual_blocks: self.l2_block.virtual_blocks, gas_limit: get_max_batch_gas_limit(definite_vm_version), logs_bloom, + pubdata_params: self.pubdata_params, }; let mut connection = strategy.connection().await?; diff --git a/core/node/state_keeper/src/io/tests/mod.rs b/core/node/state_keeper/src/io/tests/mod.rs index 4cf4a7b5a70..ece5b67767f 100644 --- a/core/node/state_keeper/src/io/tests/mod.rs +++ b/core/node/state_keeper/src/io/tests/mod.rs @@ -1,4 +1,4 @@ -use std::time::Duration; +use std::{collections::HashMap, time::Duration}; use test_casing::test_casing; use zksync_contracts::BaseSystemContractsHashes; @@ -249,6 +249,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -267,6 +268,7 @@ async fn processing_storage_logs_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); @@ -282,11 +284,11 @@ async fn processing_storage_logs_when_sealing_l2_block() { fair_pubdata_price: 100, }), base_fee_per_gas: 10, - pubdata_params: Default::default(), base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_legacy_shared_bridge_addr: Default::default(), + l2_legacy_shared_bridge_addr: Some(Address::default()), pre_insert_txs: false, + pubdata_params: Default::default(), }; connection_pool .connection() @@ -357,6 +359,7 @@ async fn processing_events_when_sealing_l2_block() { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); } @@ -371,12 +374,12 @@ async fn processing_events_when_sealing_l2_block() { fair_l2_gas_price: 100, fair_pubdata_price: 100, }), - pubdata_params: Default::default(), base_fee_per_gas: 10, base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), - l2_legacy_shared_bridge_addr: Default::default(), + l2_legacy_shared_bridge_addr: Some(Address::default()), pre_insert_txs: false, + pubdata_params: Default::default(), }; pool.connection() .await @@ -446,26 +449,29 @@ async fn l2_block_processing_after_snapshot_recovery(commitment_mode: L1BatchCom .await .unwrap() .expect("no batch params generated"); - let (system_env, l1_batch_env) = l1_batch_params.into_env( + let (system_env, l1_batch_env, pubdata_params) = l1_batch_params.into_env( L2ChainId::default(), BASE_SYSTEM_CONTRACTS.clone(), &cursor, previous_batch_hash, ); - let mut updates = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let tx_hash = tx.hash(); updates.extend_from_executed_transaction( tx.into(), create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], ); let (mut persistence, l2_block_sealer) = - StateKeeperPersistence::new(connection_pool.clone(), Some(Address::default()), 0); + StateKeeperPersistence::new(connection_pool.clone(), Some(Address::default()), 0) + .await + .unwrap(); tokio::spawn(l2_block_sealer.run()); persistence.handle_l2_block(&updates).await.unwrap(); @@ -554,3 +560,87 @@ async fn different_timestamp_for_l2_blocks_in_same_batch(commitment_mode: L1Batc .expect("no new L2 block params"); assert!(l2_block_params.timestamp > current_timestamp); } + +#[test_casing(2, COMMITMENT_MODES)] +#[tokio::test] +async fn continue_unsealed_batch_on_restart(commitment_mode: L1BatchCommitmentMode) { + let connection_pool = ConnectionPool::::test_pool().await; + let tester = Tester::new(commitment_mode); + tester.genesis(&connection_pool).await; + let mut storage = connection_pool.connection().await.unwrap(); + + let (mut mempool, mut mempool_guard) = + tester.create_test_mempool_io(connection_pool.clone()).await; + let (cursor, _) = mempool.initialize().await.unwrap(); + + // Insert a transaction into the mempool in order to open a new batch. + let tx_filter = l2_tx_filter( + &tester.create_batch_fee_input_provider().await, + ProtocolVersionId::latest().into(), + ) + .await + .unwrap(); + let tx = tester.insert_tx( + &mut mempool_guard, + tx_filter.fee_per_gas, + tx_filter.gas_per_pubdata, + ); + storage + .transactions_dal() + .insert_transaction_l2(&tx, TransactionExecutionMetrics::default()) + .await + .unwrap(); + + let old_l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + + // Restart + drop((mempool, mempool_guard, cursor)); + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool.clone()).await; + let (cursor, _) = mempool.initialize().await.unwrap(); + + let new_l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + + assert_eq!(old_l1_batch_params, new_l1_batch_params); +} + +#[test_casing(2, COMMITMENT_MODES)] +#[tokio::test] +async fn insert_unsealed_batch_on_init(commitment_mode: L1BatchCommitmentMode) { + let connection_pool = ConnectionPool::::test_pool().await; + let mut tester = Tester::new(commitment_mode); + tester.genesis(&connection_pool).await; + let fee_input = BatchFeeInput::pubdata_independent(55, 555, 5555); + let tx_result = tester + .insert_l2_block(&connection_pool, 1, 5, fee_input) + .await; + tester + .insert_sealed_batch(&connection_pool, 1, &[tx_result]) + .await; + // Pre-insert L2 block without its unsealed L1 batch counterpart + tester.set_timestamp(2); + tester + .insert_l2_block(&connection_pool, 2, 5, fee_input) + .await; + + let (mut mempool, _) = tester.create_test_mempool_io(connection_pool.clone()).await; + // Initialization is supposed to recognize that the current L1 batch is not present in the DB and + // insert it itself. + let (cursor, _) = mempool.initialize().await.unwrap(); + + // Make sure we are able to fetch the newly inserted batch's params + let l1_batch_params = mempool + .wait_for_new_batch_params(&cursor, Duration::from_secs(10)) + .await + .unwrap() + .expect("no batch params generated"); + assert_eq!(l1_batch_params.fee_input, fee_input); + assert_eq!(l1_batch_params.first_l2_block.timestamp, 2); +} diff --git a/core/node/state_keeper/src/io/tests/tester.rs b/core/node/state_keeper/src/io/tests/tester.rs index 6b7fc260b66..daedbebc75e 100644 --- a/core/node/state_keeper/src/io/tests/tester.rs +++ b/core/node/state_keeper/src/io/tests/tester.rs @@ -4,7 +4,7 @@ use std::{slice, sync::Arc, time::Duration}; use zksync_base_token_adjuster::NoOpRatioProvider; use zksync_config::{ - configs::{chain::StateKeeperConfig, eth_sender::PubdataSendingMode, wallets::Wallets}, + configs::{chain::StateKeeperConfig, wallets::Wallets}, GasAdjusterConfig, }; use zksync_contracts::BaseSystemContracts; @@ -25,9 +25,10 @@ use zksync_node_test_utils::{ use zksync_types::{ block::L2BlockHeader, commitment::L1BatchCommitmentMode, - fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV1}, + fee_model::{BatchFeeInput, FeeModelConfig, FeeModelConfigV2}, l2::L2Tx, protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, + pubdata_da::PubdataSendingMode, system_contracts::get_system_smart_contracts, L2BlockNumber, L2ChainId, PriorityOpId, ProtocolVersionId, H256, }; @@ -97,8 +98,13 @@ impl Tester { MainNodeFeeInputProvider::new( gas_adjuster, Arc::new(NoOpRatioProvider::default()), - FeeModelConfig::V1(FeeModelConfigV1 { + FeeModelConfig::V2(FeeModelConfigV2 { minimal_l2_gas_price: self.minimal_l2_gas_price(), + compute_overhead_part: 1.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 10, + max_gas_per_batch: 500_000_000_000, + max_pubdata_per_batch: 100_000_000_000, }), ) } @@ -116,8 +122,13 @@ impl Tester { let batch_fee_input_provider = MainNodeFeeInputProvider::new( gas_adjuster, Arc::new(NoOpRatioProvider::default()), - FeeModelConfig::V1(FeeModelConfigV1 { + FeeModelConfig::V2(FeeModelConfigV2 { minimal_l2_gas_price: self.minimal_l2_gas_price(), + compute_overhead_part: 1.0, + pubdata_overhead_part: 1.0, + batch_overhead_l1_gas: 10, + max_gas_per_batch: 500_000_000_000, + max_pubdata_per_batch: 100_000_000_000, }), ); @@ -134,10 +145,10 @@ impl Tester { pool, &config, wallets.state_keeper.unwrap().fee_account.address(), - Some(Default::default()), - Default::default(), Duration::from_secs(1), L2ChainId::from(270), + Some(Default::default()), + Default::default(), ) .unwrap(); @@ -158,7 +169,7 @@ impl Tester { patch: 0.into(), }, &self.base_system_contracts, - &get_system_smart_contracts(), + &get_system_smart_contracts(false), L1VerifierConfig::default(), ) .await diff --git a/core/node/state_keeper/src/keeper.rs b/core/node/state_keeper/src/keeper.rs index b40b8f304ff..49a05cc93e3 100644 --- a/core/node/state_keeper/src/keeper.rs +++ b/core/node/state_keeper/src/keeper.rs @@ -17,8 +17,9 @@ use zksync_multivm::{ use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_state::{OwnedStorage, ReadStorageFactory}; use zksync_types::{ - block::L2BlockExecutionData, l2::TransactionType, protocol_upgrade::ProtocolUpgradeTx, - protocol_version::ProtocolVersionId, utils::display_timestamp, L1BatchNumber, Transaction, + block::L2BlockExecutionData, commitment::PubdataParams, l2::TransactionType, + protocol_upgrade::ProtocolUpgradeTx, protocol_version::ProtocolVersionId, + utils::display_timestamp, L1BatchNumber, Transaction, }; use crate::{ @@ -116,6 +117,7 @@ impl ZkSyncStateKeeper { let PendingBatchData { mut l1_batch_env, mut system_env, + mut pubdata_params, pending_l2_blocks, } = match pending_batch_params { Some(params) => { @@ -132,7 +134,7 @@ impl ZkSyncStateKeeper { } None => { tracing::info!("There is no open pending batch, starting a new empty batch"); - let (system_env, l1_batch_env) = self + let (system_env, l1_batch_env, pubdata_params) = self .wait_for_new_batch_env(&cursor) .await .map_err(|e| e.context("wait_for_new_batch_params()"))?; @@ -140,18 +142,19 @@ impl ZkSyncStateKeeper { l1_batch_env, pending_l2_blocks: Vec::new(), system_env, + pubdata_params, } } }; let protocol_version = system_env.version; - let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + let mut updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); let mut protocol_upgrade_tx: Option = self .load_protocol_upgrade_tx(&pending_l2_blocks, protocol_version, l1_batch_env.number) .await?; let mut batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; self.restore_state( &mut *batch_executor, @@ -201,10 +204,11 @@ impl ZkSyncStateKeeper { // Start the new batch. next_cursor.l1_batch += 1; - (system_env, l1_batch_env) = self.wait_for_new_batch_env(&next_cursor).await?; - updates_manager = UpdatesManager::new(&l1_batch_env, &system_env); + (system_env, l1_batch_env, pubdata_params) = + self.wait_for_new_batch_env(&next_cursor).await?; + updates_manager = UpdatesManager::new(&l1_batch_env, &system_env, pubdata_params); batch_executor = self - .create_batch_executor(l1_batch_env.clone(), system_env.clone()) + .create_batch_executor(l1_batch_env.clone(), system_env.clone(), pubdata_params) .await?; let version_changed = system_env.version != sealed_batch_protocol_version; @@ -221,6 +225,7 @@ impl ZkSyncStateKeeper { &mut self, l1_batch_env: L1BatchEnv, system_env: SystemEnv, + pubdata_params: PubdataParams, ) -> Result>, Error> { let storage = self .storage_factory @@ -230,7 +235,7 @@ impl ZkSyncStateKeeper { .ok_or(Error::Canceled)?; Ok(self .batch_executor - .init_batch(storage, l1_batch_env, system_env)) + .init_batch(storage, l1_batch_env, system_env, pubdata_params)) } /// This function is meant to be called only once during the state-keeper initialization. @@ -327,7 +332,7 @@ impl ZkSyncStateKeeper { async fn wait_for_new_batch_env( &mut self, cursor: &IoCursor, - ) -> Result<(SystemEnv, L1BatchEnv), Error> { + ) -> Result<(SystemEnv, L1BatchEnv, PubdataParams), Error> { // `io.wait_for_new_batch_params(..)` is not cancel-safe; once we get new batch params, we must hold onto them // until we get the rest of parameters from I/O or receive a stop signal. let params = self.wait_for_new_batch_params(cursor).await?; @@ -498,8 +503,9 @@ impl ZkSyncStateKeeper { updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -624,8 +630,9 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, call_tracer_result, @@ -685,6 +692,7 @@ impl ZkSyncStateKeeper { tx_result, tx_metrics, compressed_bytecodes, + call_tracer_result, .. } = exec_result else { @@ -704,11 +712,12 @@ impl ZkSyncStateKeeper { } = *tx_metrics; updates_manager.extend_from_executed_transaction( tx, - *tx_result, + *tx_result.clone(), compressed_bytecodes, + tx_result.new_known_factory_deps.unwrap_or_default(), tx_l1_gas_this_tx, tx_execution_metrics, - vec![], + call_tracer_result, ); Ok(()) } diff --git a/core/node/state_keeper/src/mempool_actor.rs b/core/node/state_keeper/src/mempool_actor.rs index dbe1e4cb977..a17f2670cbb 100644 --- a/core/node/state_keeper/src/mempool_actor.rs +++ b/core/node/state_keeper/src/mempool_actor.rs @@ -89,20 +89,35 @@ impl MempoolFetcher { .await .context("failed getting pending protocol version")?; - let l2_tx_filter = l2_tx_filter( - self.batch_fee_input_provider.as_ref(), - protocol_version.into(), - ) - .await - .context("failed creating L2 transaction filter")?; + let (fee_per_gas, gas_per_pubdata) = if let Some(unsealed_batch) = storage + .blocks_dal() + .get_unsealed_l1_batch() + .await + .context("failed getting unsealed batch")? + { + let (fee_per_gas, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( + unsealed_batch.fee_input, + protocol_version.into(), + ); + (fee_per_gas, gas_per_pubdata as u32) + } else { + let filter = l2_tx_filter( + self.batch_fee_input_provider.as_ref(), + protocol_version.into(), + ) + .await + .context("failed creating L2 transaction filter")?; + + (filter.fee_per_gas, filter.gas_per_pubdata) + }; let transactions = storage .transactions_dal() .sync_mempool( &mempool_info.stashed_accounts, &mempool_info.purged_accounts, - l2_tx_filter.gas_per_pubdata, - l2_tx_filter.fee_per_gas, + gas_per_pubdata, + fee_per_gas, self.sync_batch_size, ) .await diff --git a/core/node/state_keeper/src/seal_criteria/mod.rs b/core/node/state_keeper/src/seal_criteria/mod.rs index e3fe849e802..962cc807318 100644 --- a/core/node/state_keeper/src/seal_criteria/mod.rs +++ b/core/node/state_keeper/src/seal_criteria/mod.rs @@ -277,6 +277,8 @@ impl L2BlockMaxPayloadSizeSealer { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_utils::time::seconds_since_epoch; use super::*; @@ -287,6 +289,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), BlockGasCount::default(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index efa8d738ad0..7023463df0e 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -16,9 +16,10 @@ use zksync_multivm::interface::{ use zksync_state::OwnedStorage; use zksync_test_account::Account; use zksync_types::{ - fee::Fee, get_code_key, get_known_code_key, utils::storage_key_for_standard_token_balance, - AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, PriorityOpId, StorageLog, - Transaction, H256, L2_BASE_TOKEN_ADDRESS, SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, + commitment::PubdataParams, fee::Fee, get_code_key, get_known_code_key, + utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Execute, L1BatchNumber, + L2BlockNumber, PriorityOpId, StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, + SYSTEM_CONTEXT_MINIMAL_BASE_FEE, U256, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; @@ -35,6 +36,7 @@ pub(crate) fn successful_exec() -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], @@ -51,6 +53,7 @@ impl BatchExecutorFactory for MockBatchExecutor { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { Box::new(Self) } diff --git a/core/node/state_keeper/src/testonly/test_batch_executor.rs b/core/node/state_keeper/src/testonly/test_batch_executor.rs index 4a58e9e0a95..45787b18f3c 100644 --- a/core/node/state_keeper/src/testonly/test_batch_executor.rs +++ b/core/node/state_keeper/src/testonly/test_batch_executor.rs @@ -27,8 +27,9 @@ use zksync_multivm::{ use zksync_node_test_utils::create_l2_transaction; use zksync_state::{interface::StorageView, OwnedStorage, ReadStorageFactory}; use zksync_types::{ - fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, protocol_upgrade::ProtocolUpgradeTx, - Address, L1BatchNumber, L2BlockNumber, L2ChainId, ProtocolVersionId, Transaction, H256, + commitment::PubdataParams, fee_model::BatchFeeInput, l2_to_l1_log::UserL2ToL1Log, + protocol_upgrade::ProtocolUpgradeTx, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + ProtocolVersionId, Transaction, H256, }; use crate::{ @@ -264,6 +265,7 @@ pub(crate) fn successful_exec_with_log() -> BatchTransactionExecutionResult { }, statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], @@ -278,6 +280,7 @@ pub(crate) fn rejected_exec(reason: Halt) -> BatchTransactionExecutionResult { logs: Default::default(), statistics: Default::default(), refunds: Default::default(), + new_known_factory_deps: None, }), compressed_bytecodes: vec![], call_traces: vec![], @@ -421,6 +424,7 @@ impl BatchExecutorFactory for TestBatchExecutorBuilder { _storage: OwnedStorage, _l1_batch_env: L1BatchEnv, _system_env: SystemEnv, + _pubdata_params: PubdataParams, ) -> Box> { let executor = TestBatchExecutor::new(self.txs.pop_front().unwrap(), self.rollback_set.clone()); diff --git a/core/node/state_keeper/src/tests/mod.rs b/core/node/state_keeper/src/tests/mod.rs index 4a5a099d977..a1973aaed11 100644 --- a/core/node/state_keeper/src/tests/mod.rs +++ b/core/node/state_keeper/src/tests/mod.rs @@ -58,8 +58,8 @@ pub(crate) fn pending_batch_data(pending_l2_blocks: Vec) - execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), }, + pubdata_params: Default::default(), pending_l2_blocks, } } @@ -73,7 +73,6 @@ pub(super) fn default_system_env() -> SystemEnv { execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), } } @@ -104,7 +103,7 @@ pub(super) fn default_l1_batch_env( pub(super) fn create_updates_manager() -> UpdatesManager { let l1_batch_env = default_l1_batch_env(1, 1, Address::default()); - UpdatesManager::new(&l1_batch_env, &default_system_env()) + UpdatesManager::new(&l1_batch_env, &default_system_env(), Default::default()) } pub(super) fn create_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> Transaction { @@ -140,6 +139,7 @@ pub(super) fn create_execution_result( circuit_statistic: Default::default(), }, refunds: Refunds::default(), + new_known_factory_deps: None, } } diff --git a/core/node/state_keeper/src/updates/l1_batch_updates.rs b/core/node/state_keeper/src/updates/l1_batch_updates.rs index aa2e22cac48..2979ebbd8c2 100644 --- a/core/node/state_keeper/src/updates/l1_batch_updates.rs +++ b/core/node/state_keeper/src/updates/l1_batch_updates.rs @@ -49,6 +49,8 @@ impl L1BatchUpdates { #[cfg(test)] mod tests { + use std::collections::HashMap; + use zksync_multivm::vm_latest::TransactionVmExt; use zksync_types::{L2BlockNumber, ProtocolVersionId, H256}; @@ -76,6 +78,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/l2_block_updates.rs b/core/node/state_keeper/src/updates/l2_block_updates.rs index d8673088dc3..27995b384ab 100644 --- a/core/node/state_keeper/src/updates/l2_block_updates.rs +++ b/core/node/state_keeper/src/updates/l2_block_updates.rs @@ -1,17 +1,14 @@ use std::collections::HashMap; -use once_cell::sync::Lazy; use zksync_multivm::{ interface::{ Call, CompressedBytecodeInfo, ExecutionResult, L2BlockEnv, TransactionExecutionResult, TxExecutionStatus, VmEvent, VmExecutionMetrics, VmExecutionResultAndLogs, }, - vm_latest::TransactionVmExt, + vm_latest::{utils::extract_bytecodes_marked_as_known, TransactionVmExt}, }; -use zksync_system_constants::KNOWN_CODES_STORAGE_ADDRESS; use zksync_types::{ block::{BlockGasCount, L2BlockHasher}, - ethabi, l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, L2BlockNumber, ProtocolVersionId, StorageLogWithPreviousValue, Transaction, H256, }; @@ -19,27 +16,6 @@ use zksync_utils::bytecode::hash_bytecode; use crate::metrics::KEEPER_METRICS; -/// Extracts all bytecodes marked as known on the system contracts. -fn extract_bytecodes_marked_as_known(all_generated_events: &[VmEvent]) -> Vec { - static PUBLISHED_BYTECODE_SIGNATURE: Lazy = Lazy::new(|| { - ethabi::long_signature( - "MarkedAsKnown", - &[ethabi::ParamType::FixedBytes(32), ethabi::ParamType::Bool], - ) - }); - - all_generated_events - .iter() - .filter(|event| { - // Filter events from the deployer contract that match the expected signature. - event.address == KNOWN_CODES_STORAGE_ADDRESS - && event.indexed_topics.len() == 3 - && event.indexed_topics[0] == *PUBLISHED_BYTECODE_SIGNATURE - }) - .map(|event| event.indexed_topics[1]) - .collect() -} - #[derive(Debug, Clone, PartialEq)] pub struct L2BlockUpdates { pub executed_transactions: Vec, @@ -104,6 +80,7 @@ impl L2BlockUpdates { self.block_execution_metrics += execution_metrics; } + #[allow(clippy::too_many_arguments)] pub(crate) fn extend_from_executed_transaction( &mut self, tx: Transaction, @@ -111,6 +88,7 @@ impl L2BlockUpdates { tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, call_traces: Vec, ) { let saved_factory_deps = @@ -145,12 +123,15 @@ impl L2BlockUpdates { // Get transaction factory deps let factory_deps = &tx.execute.factory_deps; - let tx_factory_deps: HashMap<_, _> = factory_deps + let mut tx_factory_deps: HashMap<_, _> = factory_deps .iter() - .map(|bytecode| (hash_bytecode(bytecode), bytecode)) + .map(|bytecode| (hash_bytecode(bytecode), bytecode.clone())) .collect(); + // Ensure that *dynamic* factory deps (ones that may be created when executing EVM contracts) + // are added into the lookup map as well. + tx_factory_deps.extend(new_known_factory_deps); - // Save all bytecodes that were marked as known on the bootloader + // Save all bytecodes that were marked as known in the bootloader let known_bytecodes = saved_factory_deps.into_iter().map(|bytecode_hash| { let bytecode = tx_factory_deps.get(&bytecode_hash).unwrap_or_else(|| { panic!( @@ -230,6 +211,7 @@ mod tests { BlockGasCount::default(), VmExecutionMetrics::default(), vec![], + HashMap::new(), vec![], ); diff --git a/core/node/state_keeper/src/updates/mod.rs b/core/node/state_keeper/src/updates/mod.rs index 9d12d86de6f..b1bd35c921c 100644 --- a/core/node/state_keeper/src/updates/mod.rs +++ b/core/node/state_keeper/src/updates/mod.rs @@ -1,3 +1,5 @@ +use std::collections::HashMap; + use zksync_contracts::BaseSystemContractsHashes; use zksync_multivm::{ interface::{ @@ -8,7 +10,7 @@ use zksync_multivm::{ }; use zksync_types::{ block::BlockGasCount, commitment::PubdataParams, fee_model::BatchFeeInput, Address, - L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, + L1BatchNumber, L2BlockNumber, ProtocolVersionId, Transaction, H256, }; pub(crate) use self::{l1_batch_updates::L1BatchUpdates, l2_block_updates::L2BlockUpdates}; @@ -30,20 +32,24 @@ pub mod l2_block_updates; #[derive(Debug)] pub struct UpdatesManager { batch_timestamp: u64, - fee_account_address: Address, + pub fee_account_address: Address, batch_fee_input: BatchFeeInput, base_fee_per_gas: u64, base_system_contract_hashes: BaseSystemContractsHashes, - pubdata_params: PubdataParams, protocol_version: ProtocolVersionId, storage_view_cache: Option, pub l1_batch: L1BatchUpdates, pub l2_block: L2BlockUpdates, pub storage_writes_deduplicator: StorageWritesDeduplicator, + pubdata_params: PubdataParams, } impl UpdatesManager { - pub fn new(l1_batch_env: &L1BatchEnv, system_env: &SystemEnv) -> Self { + pub fn new( + l1_batch_env: &L1BatchEnv, + system_env: &SystemEnv, + pubdata_params: PubdataParams, + ) -> Self { let protocol_version = system_env.version; Self { batch_timestamp: l1_batch_env.timestamp, @@ -52,7 +58,6 @@ impl UpdatesManager { base_fee_per_gas: get_batch_base_fee(l1_batch_env, protocol_version.into()), protocol_version, base_system_contract_hashes: system_env.base_system_smart_contracts.hashes(), - pubdata_params: system_env.pubdata_params, l1_batch: L1BatchUpdates::new(l1_batch_env.number), l2_block: L2BlockUpdates::new( l1_batch_env.first_l2_block.timestamp, @@ -63,6 +68,7 @@ impl UpdatesManager { ), storage_writes_deduplicator: StorageWritesDeduplicator::new(), storage_view_cache: None, + pubdata_params, } } @@ -95,11 +101,11 @@ impl UpdatesManager { fee_account_address: self.fee_account_address, fee_input: self.batch_fee_input, base_fee_per_gas: self.base_fee_per_gas, - pubdata_params: self.pubdata_params, base_system_contracts_hashes: self.base_system_contract_hashes, protocol_version: Some(self.protocol_version), l2_legacy_shared_bridge_addr, pre_insert_txs, + pubdata_params: self.pubdata_params, } } @@ -107,11 +113,13 @@ impl UpdatesManager { self.protocol_version } + #[allow(clippy::too_many_arguments)] pub fn extend_from_executed_transaction( &mut self, tx: Transaction, tx_execution_result: VmExecutionResultAndLogs, compressed_bytecodes: Vec, + new_known_factory_deps: HashMap>, tx_l1_gas_this_tx: BlockGasCount, execution_metrics: VmExecutionMetrics, call_traces: Vec, @@ -127,6 +135,7 @@ impl UpdatesManager { tx_l1_gas_this_tx, execution_metrics, compressed_bytecodes, + new_known_factory_deps, call_traces, ); latency.observe(); @@ -210,11 +219,11 @@ pub struct L2BlockSealCommand { pub base_system_contracts_hashes: BaseSystemContractsHashes, pub protocol_version: Option, pub l2_legacy_shared_bridge_addr: Option
, - pub pubdata_params: PubdataParams, /// Whether transactions should be pre-inserted to DB. /// Should be set to `true` for EN's IO as EN doesn't store transactions in DB /// before they are included into L2 blocks. pub pre_insert_txs: bool, + pub pubdata_params: PubdataParams, } #[cfg(test)] @@ -237,6 +246,7 @@ mod tests { tx, create_execution_result([]), vec![], + HashMap::new(), new_block_gas_count(), VmExecutionMetrics::default(), vec![], diff --git a/core/node/tee_verifier_input_producer/Cargo.toml b/core/node/tee_verifier_input_producer/Cargo.toml deleted file mode 100644 index 7a5a4de5d0c..00000000000 --- a/core/node/tee_verifier_input_producer/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "zksync_tee_verifier_input_producer" -description = "ZKsync TEE verifier input producer" -version.workspace = true -edition.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -license.workspace = true -keywords.workspace = true -categories.workspace = true - -[dependencies] -zksync_dal.workspace = true -zksync_object_store.workspace = true -zksync_prover_interface.workspace = true -zksync_queued_job_processor.workspace = true -zksync_tee_verifier.workspace = true -zksync_types.workspace = true -zksync_utils.workspace = true -zksync_vm_executor.workspace = true -vise.workspace = true - -anyhow.workspace = true -async-trait.workspace = true -tracing.workspace = true -tokio = { workspace = true, features = ["time"] } diff --git a/core/node/tee_verifier_input_producer/README.md b/core/node/tee_verifier_input_producer/README.md deleted file mode 100644 index 75a2029985c..00000000000 --- a/core/node/tee_verifier_input_producer/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# `zksync_tee_verifier_input_producer` - -Component responsible for producing inputs for verification of execution in TEE. diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs deleted file mode 100644 index 8a99aa07ae5..00000000000 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ /dev/null @@ -1,261 +0,0 @@ -//! Produces input for a TEE Verifier -//! -//! Extract all data needed to re-execute and verify an L1Batch without accessing -//! the DB and/or the object store. -//! -//! For testing purposes, the L1 batch is re-executed immediately for now. -//! Eventually, this component will only extract the inputs and send them to another -//! machine over a "to be defined" channel, e.g., save them to an object store. - -use std::{sync::Arc, time::Instant}; - -use anyhow::Context; -use async_trait::async_trait; -use tokio::task::JoinHandle; -use zksync_dal::{tee_verifier_input_producer_dal::JOB_MAX_ATTEMPT, ConnectionPool, Core, CoreDal}; -use zksync_object_store::ObjectStore; -use zksync_prover_interface::inputs::{ - TeeVerifierInput, V1TeeVerifierInput, WitnessInputMerklePaths, -}; -use zksync_queued_job_processor::JobProcessor; -use zksync_tee_verifier::Verify; -use zksync_types::{tee_types::TeeType, L1BatchNumber, L2ChainId}; -use zksync_utils::u256_to_h256; -use zksync_vm_executor::storage::L1BatchParamsProvider; - -use self::metrics::METRICS; - -mod metrics; - -/// Component that extracts all data (from DB) necessary to run a TEE Verifier. -#[derive(Debug)] -pub struct TeeVerifierInputProducer { - connection_pool: ConnectionPool, - l2_chain_id: L2ChainId, - object_store: Arc, -} - -impl TeeVerifierInputProducer { - pub async fn new( - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - Ok(TeeVerifierInputProducer { - connection_pool, - object_store, - l2_chain_id, - }) - } - - async fn process_job_impl( - l1_batch_number: L1BatchNumber, - started_at: Instant, - connection_pool: ConnectionPool, - object_store: Arc, - l2_chain_id: L2ChainId, - ) -> anyhow::Result { - let prepare_basic_circuits_job: WitnessInputMerklePaths = object_store - .get(l1_batch_number) - .await - .context("failed to get PrepareBasicCircuitsJob from object store")?; - - let mut connection = connection_pool - .connection() - .await - .context("failed to get connection for TeeVerifierInputProducer")?; - - let l2_blocks_execution_data = connection - .transactions_dal() - .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) - .await?; - - let l1_batch_header = connection - .blocks_dal() - .get_l1_batch_header(l1_batch_number) - .await - .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? - .unwrap(); - - let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) - .await - .context("failed initializing L1 batch params provider")?; - - // In the state keeper, this value is used to reject execution. - // All batches have already been executed by State Keeper. - // This means we don't want to reject any execution, therefore we're using MAX as an allow all. - let validation_computational_gas_limit = u32::MAX; - - let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_env( - &mut connection, - l1_batch_number, - validation_computational_gas_limit, - l2_chain_id, - ) - .await? - .with_context(|| format!("expected L1 batch #{l1_batch_number} to be sealed"))?; - - let used_contract_hashes = l1_batch_header - .used_contract_hashes - .into_iter() - .map(u256_to_h256) - .collect(); - - // `get_factory_deps()` returns the bytecode in chunks of `Vec<[u8; 32]>`, - // but `fn store_factory_dep(&mut self, hash: H256, bytecode: Vec)` in `InMemoryStorage` wants flat byte vecs. - pub fn into_flattened(data: Vec<[T; N]>) -> Vec { - let mut new = Vec::new(); - for slice in data.iter() { - new.extend_from_slice(slice); - } - new - } - - let used_contracts = connection - .factory_deps_dal() - .get_factory_deps(&used_contract_hashes) - .await - .into_iter() - .map(|(hash, bytes)| (u256_to_h256(hash), into_flattened(bytes))) - .collect(); - - tracing::info!("Started execution of l1_batch: {l1_batch_number:?}"); - - let tee_verifier_input = V1TeeVerifierInput::new( - prepare_basic_circuits_job, - l2_blocks_execution_data, - l1_batch_env, - system_env, - used_contracts, - ); - - // TODO (SEC-263): remove these 2 lines after successful testnet runs - tee_verifier_input.clone().verify()?; - tracing::info!("Looks like we verified {l1_batch_number} correctly"); - - tracing::info!("Finished execution of l1_batch: {l1_batch_number:?}"); - - METRICS.process_batch_time.observe(started_at.elapsed()); - tracing::debug!( - "TeeVerifierInputProducer took {:?} for L1BatchNumber {}", - started_at.elapsed(), - l1_batch_number.0 - ); - - Ok(TeeVerifierInput::new(tee_verifier_input)) - } -} - -#[async_trait] -impl JobProcessor for TeeVerifierInputProducer { - type Job = L1BatchNumber; - type JobId = L1BatchNumber; - type JobArtifacts = TeeVerifierInput; - const SERVICE_NAME: &'static str = "tee_verifier_input_producer"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut connection = self.connection_pool.connection().await?; - let l1_batch_to_process = connection - .tee_verifier_input_producer_dal() - .get_next_tee_verifier_input_producer_job() - .await - .context("failed to get next basic witness input producer job")?; - Ok(l1_batch_to_process.map(|number| (number, number))) - } - - async fn save_failure(&self, job_id: Self::JobId, started_at: Instant, error: String) { - let attempts = self - .connection_pool - .connection() - .await - .unwrap() - .tee_verifier_input_producer_dal() - .mark_job_as_failed(job_id, started_at, error) - .await - .expect("errored whilst marking job as failed"); - if let Some(tries) = attempts { - tracing::warn!("Failed to process job: {job_id:?}, after {tries} tries."); - } else { - tracing::warn!("L1 Batch {job_id:?} was processed successfully by another worker."); - } - } - - async fn process_job( - &self, - _job_id: &Self::JobId, - job: Self::Job, - started_at: Instant, - ) -> JoinHandle> { - let l2_chain_id = self.l2_chain_id; - let connection_pool = self.connection_pool.clone(); - let object_store = self.object_store.clone(); - tokio::task::spawn(async move { - Self::process_job_impl( - job, - started_at, - connection_pool.clone(), - object_store, - l2_chain_id, - ) - .await - }) - } - - async fn save_result( - &self, - job_id: Self::JobId, - started_at: Instant, - artifacts: Self::JobArtifacts, - ) -> anyhow::Result<()> { - let observer: vise::LatencyObserver = METRICS.upload_input_time.start(); - let object_path = self - .object_store - .put(job_id, &artifacts) - .await - .context("failed to upload artifacts for TeeVerifierInputProducer")?; - observer.observe(); - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - let mut transaction = connection - .start_transaction() - .await - .context("failed to acquire DB transaction for TeeVerifierInputProducer")?; - transaction - .tee_verifier_input_producer_dal() - .mark_job_as_successful(job_id, started_at, &object_path) - .await - .context("failed to mark job as successful for TeeVerifierInputProducer")?; - transaction - .tee_proof_generation_dal() - .insert_tee_proof_generation_job(job_id, TeeType::Sgx) - .await?; - transaction - .commit() - .await - .context("failed to commit DB transaction for TeeVerifierInputProducer")?; - METRICS.block_number_processed.set(job_id.0 as u64); - Ok(()) - } - - fn max_attempts(&self) -> u32 { - JOB_MAX_ATTEMPT as u32 - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut connection = self - .connection_pool - .connection() - .await - .context("failed to acquire DB connection for TeeVerifierInputProducer")?; - connection - .tee_verifier_input_producer_dal() - .get_tee_verifier_input_producer_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for TeeVerifierInputProducer") - } -} diff --git a/core/node/tee_verifier_input_producer/src/metrics.rs b/core/node/tee_verifier_input_producer/src/metrics.rs deleted file mode 100644 index 362804d338e..00000000000 --- a/core/node/tee_verifier_input_producer/src/metrics.rs +++ /dev/null @@ -1,18 +0,0 @@ -//! Metrics - -use std::time::Duration; - -use vise::{Buckets, Gauge, Histogram, Metrics, Unit}; - -#[derive(Debug, Metrics)] -#[metrics(prefix = "tee_verifier_input_producer")] -pub(crate) struct TeeVerifierInputProducerMetrics { - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub process_batch_time: Histogram, - #[metrics(buckets = Buckets::LATENCIES, unit = Unit::Seconds)] - pub upload_input_time: Histogram, - pub block_number_processed: Gauge, -} - -#[vise::register] -pub(super) static METRICS: vise::Global = vise::Global::new(); diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 3caadaaf573..86ce3aadd9a 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -39,13 +39,13 @@ pub fn create_l2_block(number: u32) -> L2BlockHeader { base_fee_per_gas: 100, batch_fee_input: BatchFeeInput::l1_pegged(100, 100), fee_account_address: Address::zero(), - pubdata_params: Default::default(), gas_per_pubdata_limit: get_max_gas_per_pubdata_byte(ProtocolVersionId::latest().into()), base_system_contracts_hashes: BaseSystemContractsHashes::default(), protocol_version: Some(ProtocolVersionId::latest()), virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), } } @@ -57,6 +57,7 @@ pub fn create_l1_batch(number: u32) -> L1BatchHeader { BaseSystemContractsHashes { bootloader: H256::repeat_byte(1), default_aa: H256::repeat_byte(42), + evm_emulator: None, }, ProtocolVersionId::latest(), ); @@ -89,6 +90,7 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { zkporter_is_available: ZKPORTER_IS_AVAILABLE, bootloader_code_hash: BaseSystemContractsHashes::default().bootloader, default_aa_code_hash: BaseSystemContractsHashes::default().default_aa, + evm_emulator_code_hash: BaseSystemContractsHashes::default().evm_emulator, protocol_version: Some(ProtocolVersionId::latest()), }, aux_data_hash: H256::zero(), @@ -115,13 +117,10 @@ pub fn l1_batch_metadata_to_commitment_artifacts( commitment: metadata.commitment, }, l2_l1_merkle_root: metadata.l2_l1_merkle_root, - local_root: metadata.local_root.unwrap(), - aggregation_root: metadata.aggregation_root.unwrap(), compressed_state_diffs: Some(metadata.state_diffs_compressed.clone()), compressed_initial_writes: metadata.initial_writes_compressed.clone(), compressed_repeated_writes: metadata.repeated_writes_compressed.clone(), zkporter_is_available: ZKPORTER_IS_AVAILABLE, - state_diff_hash: metadata.state_diff_hash.unwrap(), aux_commitments: match ( metadata.bootloader_initial_content_commitment, metadata.events_queue_commitment, @@ -134,6 +133,9 @@ pub fn l1_batch_metadata_to_commitment_artifacts( } _ => None, }, + local_root: metadata.local_root.unwrap(), + aggregation_root: metadata.aggregation_root.unwrap(), + state_diff_hash: metadata.state_diff_hash.unwrap(), } } @@ -214,18 +216,19 @@ impl Snapshot { gas_per_pubdata_limit: get_max_gas_per_pubdata_byte( genesis_params.minor_protocol_version().into(), ), - pubdata_params: Default::default(), base_system_contracts_hashes: contracts.hashes(), protocol_version: Some(genesis_params.minor_protocol_version()), virtual_blocks: 1, gas_limit: 0, logs_bloom: Default::default(), + pubdata_params: Default::default(), }; Snapshot { l1_batch, l2_block, factory_deps: [&contracts.bootloader, &contracts.default_aa] .into_iter() + .chain(contracts.evm_emulator.as_ref()) .map(|c| (c.hash, zksync_utils::be_words_to_bytes(&c.code))) .collect(), storage_logs, diff --git a/core/node/vm_runner/src/impls/bwip.rs b/core/node/vm_runner/src/impls/bwip.rs index 6c2933635b4..a2cf126f549 100644 --- a/core/node/vm_runner/src/impls/bwip.rs +++ b/core/node/vm_runner/src/impls/bwip.rs @@ -209,6 +209,7 @@ async fn get_updates_manager_witness_input_data( ) -> anyhow::Result { let initial_heap_content = output.batch.final_bootloader_memory.clone().unwrap(); // might be just empty let default_aa = system_env.base_system_smart_contracts.hashes().default_aa; + let evm_emulator = system_env.base_system_smart_contracts.hashes().evm_emulator; let bootloader = system_env.base_system_smart_contracts.hashes().bootloader; let bootloader_code_bytes = connection .factory_deps_dal() @@ -240,6 +241,22 @@ async fn get_updates_manager_witness_input_data( used_bytecodes.insert(account_code_hash, account_bytecode); } + let evm_emulator_code_hash = if let Some(evm_emulator) = evm_emulator { + let evm_emulator_code_hash = h256_to_u256(evm_emulator); + if used_contract_hashes.contains(&evm_emulator_code_hash) { + let evm_emulator_bytecode = connection + .factory_deps_dal() + .get_sealed_factory_dep(evm_emulator) + .await? + .ok_or_else(|| anyhow!("EVM emulator bytecode should exist"))?; + let evm_emulator_bytecode = bytes_to_chunks(&evm_emulator_bytecode); + used_bytecodes.insert(evm_emulator_code_hash, evm_emulator_bytecode); + } + Some(evm_emulator_code_hash) + } else { + None + }; + let storage_refunds = output.batch.final_execution_state.storage_refunds.clone(); let pubdata_costs = output.batch.final_execution_state.pubdata_costs.clone(); let witness_block_state = WitnessStorageState { @@ -254,6 +271,7 @@ async fn get_updates_manager_witness_input_data( protocol_version: system_env.version, bootloader_code, default_account_code_hash: account_code_hash, + evm_emulator_code_hash, storage_refunds, pubdata_costs, witness_block_state, diff --git a/core/node/vm_runner/src/process.rs b/core/node/vm_runner/src/process.rs index 4f7ac1f9728..dbd218c8dc5 100644 --- a/core/node/vm_runner/src/process.rs +++ b/core/node/vm_runner/src/process.rs @@ -82,6 +82,7 @@ impl VmRunner { storage, batch_data.l1_batch_env.clone(), batch_data.system_env.clone(), + batch_data.pubdata_params, ); let mut output_handler = self .output_handler_factory diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index 2285455ba24..9ab4ed87b9f 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -13,7 +13,9 @@ use zksync_state::{ AsyncCatchupTask, BatchDiff, OwnedStorage, RocksdbCell, RocksdbStorage, RocksdbStorageBuilder, RocksdbWithMemory, }; -use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2ChainId}; +use zksync_types::{ + block::L2BlockExecutionData, commitment::PubdataParams, L1BatchNumber, L2ChainId, +}; use zksync_vm_executor::storage::L1BatchParamsProvider; use zksync_vm_interface::{L1BatchEnv, SystemEnv}; @@ -106,6 +108,8 @@ pub struct BatchExecuteData { pub l1_batch_env: L1BatchEnv, /// Execution process parameters. pub system_env: SystemEnv, + /// Pubdata building parameters. + pub pubdata_params: PubdataParams, /// List of L2 blocks and corresponding transactions that were executed within batch. pub l2_blocks: Vec, } @@ -394,7 +398,7 @@ pub(crate) async fn load_batch_execute_data( l1_batch_params_provider: &L1BatchParamsProvider, chain_id: L2ChainId, ) -> anyhow::Result> { - let Some((system_env, l1_batch_env)) = l1_batch_params_provider + let Some((system_env, l1_batch_env, pubdata_params)) = l1_batch_params_provider .load_l1_batch_env( conn, l1_batch_number, @@ -415,6 +419,7 @@ pub(crate) async fn load_batch_execute_data( Ok(Some(BatchExecuteData { l1_batch_env, system_env, + pubdata_params, l2_blocks, })) } diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index cc96353e5c3..6eba504deec 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -323,6 +323,7 @@ async fn store_l1_batches( .iter() .map(|contract| hash_bytecode(&contract.bytecode)) .chain([genesis_params.base_system_contracts().hashes().default_aa]) + .chain(genesis_params.base_system_contracts().hashes().evm_emulator) .map(h256_to_u256) .collect(); diff --git a/core/node/vm_runner/src/tests/output_handler.rs b/core/node/vm_runner/src/tests/output_handler.rs index 131089d0f79..f57814ea449 100644 --- a/core/node/vm_runner/src/tests/output_handler.rs +++ b/core/node/vm_runner/src/tests/output_handler.rs @@ -66,12 +66,12 @@ impl OutputHandlerTester { code: vec![], hash: Default::default(), }, + evm_emulator: None, }, bootloader_gas_limit: 0, execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: 0, chain_id: Default::default(), - pubdata_params: Default::default(), }; let mut output_handler = self diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 462404af606..8567be6d6d3 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -84,9 +84,9 @@ export async function getExternalNodeHealth(url: string) { } } -export async function dropNodeData(env: { [key: string]: string }, useZkSupervisor?: boolean, chain?: string) { - if (useZkSupervisor) { - let cmd = 'zk_inception external-node init'; +export async function dropNodeData(env: { [key: string]: string }, useZkStack?: boolean, chain?: string) { + if (useZkStack) { + let cmd = 'zkstack external-node init'; cmd += chain ? ` --chain ${chain}` : ''; await executeNodeCommand(env, cmd); } else { @@ -176,7 +176,7 @@ export class NodeProcess { logsFile: FileHandle | string, pathToHome: string, components: NodeComponents = NodeComponents.STANDARD, - useZkInception?: boolean, + useZkStack?: boolean, chain?: string ) { const logs = typeof logsFile === 'string' ? await fs.open(logsFile, 'a') : logsFile; @@ -186,7 +186,7 @@ export class NodeProcess { stdio: ['ignore', logs.fd, logs.fd], cwd: pathToHome, env, - useZkInception, + useZkStack, chain }); diff --git a/core/tests/recovery-test/src/utils.ts b/core/tests/recovery-test/src/utils.ts index 98c6b6d4405..c60f5603f17 100644 --- a/core/tests/recovery-test/src/utils.ts +++ b/core/tests/recovery-test/src/utils.ts @@ -48,19 +48,19 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception external-node run'; + if (useZkStack) { + command = 'zkstack external-node run'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'zk external-node --'; diff --git a/core/tests/recovery-test/tests/snapshot-recovery.test.ts b/core/tests/recovery-test/tests/snapshot-recovery.test.ts index cadf146c522..eca0da78d78 100644 --- a/core/tests/recovery-test/tests/snapshot-recovery.test.ts +++ b/core/tests/recovery-test/tests/snapshot-recovery.test.ts @@ -458,10 +458,10 @@ async function decompressGzip(filePath: string): Promise { }); } -async function createSnapshot(zkSupervisor: boolean) { +async function createSnapshot(useZkStack: boolean) { let command = ''; - if (zkSupervisor) { - command = `zk_supervisor snapshot create`; + if (useZkStack) { + command = `zkstack dev snapshot create`; command += ` --chain ${fileConfig.chain}`; } else { command = `zk run snapshots-creator`; diff --git a/core/tests/revert-test/tests/utils.ts b/core/tests/revert-test/tests/utils.ts index ea8a45b97c3..fe5cb40799a 100644 --- a/core/tests/revert-test/tests/utils.ts +++ b/core/tests/revert-test/tests/utils.ts @@ -51,19 +51,19 @@ export function runServerInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; if (chain) { command += ` --chain ${chain}`; } @@ -78,19 +78,19 @@ export function runExternalNodeInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; env?: Parameters[0]['env']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception external-node run'; + if (useZkStack) { + command = 'zkstack external-node run'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'zk external-node'; @@ -334,7 +334,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env: env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); @@ -362,7 +362,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index d0c97abab72..cfb539c0e0f 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -2,7 +2,7 @@ use ethabi::Token; use zksync_contracts::{ deployer_contract, load_contract, test_contracts::LoadnextContractExecutionParams, }; -use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; +use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_system_constants::{ CONTRACT_DEPLOYER_ADDRESS, DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, @@ -54,6 +54,12 @@ impl Account { Self::new(K256PrivateKey::random_using(rng)) } + /// Creates an account deterministically from the provided seed. + pub fn from_seed(seed: u32) -> Self { + let private_key_bytes = H256::from_low_u64_be(u64::from(seed) + 1); + Self::new(K256PrivateKey::from_bytes(private_key_bytes).unwrap()) + } + pub fn get_l2_tx_for_execute(&mut self, execute: Execute, fee: Option) -> Transaction { let tx = self.get_l2_tx_for_execute_with_nonce(execute, fee, self.nonce); self.nonce += 1; @@ -154,7 +160,7 @@ impl Account { let max_fee_per_gas = U256::from(0u32); let gas_limit = U256::from(20_000_000); let factory_deps = execute.factory_deps; - abi::Transaction::L1 { + let tx = abi::Transaction::L1 { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&self.address), @@ -186,9 +192,8 @@ impl Account { .into(), factory_deps, eth_block: 0, - } - .try_into() - .unwrap() + }; + Transaction::from_abi(tx, false).unwrap() } pub fn get_test_contract_transaction( @@ -255,8 +260,8 @@ impl Account { PrivateKeySigner::new(self.private_key.clone()) } - pub async fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { + pub fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { let pk_signer = self.get_pk_signer(); - pk_signer.sign_transaction(tx).await.unwrap() + pk_signer.sign_transaction(tx) } } diff --git a/core/tests/ts-integration/src/utils.ts b/core/tests/ts-integration/src/utils.ts index 128d0be57d0..bb6fa93757e 100644 --- a/core/tests/ts-integration/src/utils.ts +++ b/core/tests/ts-integration/src/utils.ts @@ -20,21 +20,21 @@ export function runServerInBackground({ stdio, cwd, env, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: ProcessEnvOptions['cwd']; env?: ProcessEnvOptions['env']; - useZkInception?: boolean; + useZkStack?: boolean; newL1GasPrice?: string; newPubdataPrice?: string; chain?: string; }): ChildProcessWithoutNullStreams { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; if (chain) { command += ` --chain ${chain}`; } @@ -167,7 +167,7 @@ export class NodeSpawner { stdio: ['ignore', logs, logs], cwd: pathToHome, env: env, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index 00b856cea12..9db4ed211e9 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -189,7 +189,8 @@ describe.skip('web3 API compatibility tests', () => { ['eth_getCompilers', [], []], ['eth_hashrate', [], '0x0'], ['eth_mining', [], false], - ['eth_getUncleCountByBlockNumber', ['0x0'], '0x0'] + ['eth_getUncleCountByBlockNumber', ['0x0'], '0x0'], + ['eth_maxPriorityFeePerGas', [], '0x0'] ])('Should test bogus web3 methods (%s)', async (method: string, input: string[], output: string) => { await expect(alice.provider.send(method, input)).resolves.toEqual(output); }); @@ -271,7 +272,8 @@ describe.skip('web3 API compatibility tests', () => { const eip1559ApiReceipt = await alice.provider.getTransaction(eip1559Tx.hash); expect(eip1559ApiReceipt.maxFeePerGas).toEqual(eip1559Tx.maxFeePerGas!); - expect(eip1559ApiReceipt.maxPriorityFeePerGas).toEqual(eip1559Tx.maxPriorityFeePerGas!); + // `ethers` will use value provided by `eth_maxPriorityFeePerGas`, and we return 0 there. + expect(eip1559ApiReceipt.maxPriorityFeePerGas).toEqual(0n); }); test('Should test getFilterChanges for pending transactions', async () => { diff --git a/core/tests/ts-integration/tests/base-token.test.ts b/core/tests/ts-integration/tests/base-token.test.ts index 7f7974205dc..5abae0b89d3 100644 --- a/core/tests/ts-integration/tests/base-token.test.ts +++ b/core/tests/ts-integration/tests/base-token.test.ts @@ -9,7 +9,7 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { scaledGasPrice, waitForBlockToBeFinalizedOnL1 } from '../src/helpers'; -const SECONDS = 1000; +const SECONDS = 2000; jest.setTimeout(100 * SECONDS); describe('base ERC20 contract checks', () => { diff --git a/core/tests/ts-integration/tests/contracts.test.ts b/core/tests/ts-integration/tests/contracts.test.ts index b6b9672750b..6cfb85fa027 100644 --- a/core/tests/ts-integration/tests/contracts.test.ts +++ b/core/tests/ts-integration/tests/contracts.test.ts @@ -16,7 +16,7 @@ import * as elliptic from 'elliptic'; import { RetryProvider } from '../src/retry-provider'; const SECONDS = 1000; -jest.setTimeout(300 * SECONDS); +jest.setTimeout(400 * SECONDS); // TODO: Leave only important ones. const contracts = { diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 92bbfff1965..c9862c58507 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -280,7 +280,6 @@ testFees('Test fees', function () { }); afterAll(async () => { - await testMaster.deinitialize(); await mainNode.killAndWaitForShutdown(); // Returning the pubdata price to the default one @@ -289,6 +288,7 @@ testFees('Test fees', function () { deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); mainNode = await mainNodeSpawner.spawnMainNode(); + await testMaster.deinitialize(); __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNode.proc.pid!); }); }); diff --git a/core/tests/upgrade-test/tests/upgrade.test.ts b/core/tests/upgrade-test/tests/upgrade.test.ts index 2e223b9d744..4065480b121 100644 --- a/core/tests/upgrade-test/tests/upgrade.test.ts +++ b/core/tests/upgrade-test/tests/upgrade.test.ts @@ -64,9 +64,21 @@ describe('Upgrade test', function () { complexUpgraderAddress = '0x000000000000000000000000000000000000800f'; if (fileConfig.loadFromFile) { - const generalConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'general.yaml' }); - const contractsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'contracts.yaml' }); - const secretsConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'secrets.yaml' }); + const generalConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'general.yaml' + }); + const contractsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'contracts.yaml' + }); + const secretsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'secrets.yaml' + }); ethProviderAddress = secretsConfig.l1.l1_rpc_url; web3JsonRpc = generalConfig.api.web3_json_rpc.http_url; @@ -89,7 +101,11 @@ describe('Upgrade test', function () { alice = tester.emptyWallet(); if (fileConfig.loadFromFile) { - const chainWalletConfig = loadConfig({ pathToHome, chain: fileConfig.chain, config: 'wallets.yaml' }); + const chainWalletConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'wallets.yaml' + }); adminGovWallet = new ethers.Wallet(chainWalletConfig.governor.private_key, alice._providerL1()); @@ -144,7 +160,7 @@ describe('Upgrade test', function () { components: serverComponents, stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); // Server may need some time to recompile if it's a cold run, so wait for it. @@ -220,8 +236,15 @@ describe('Upgrade test', function () { }); step('Send l1 tx for saving new bootloader', async () => { - const path = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; - const bootloaderCode = ethers.hexlify(fs.readFileSync(path)); + const path = `${pathToHome}/contracts/system-contracts/zkout/playground_batch.yul/contracts-preprocessed/bootloader/playground_batch.yul.json`; + let bootloaderCode; + if (fs.existsSync(path)) { + bootloaderCode = '0x'.concat(require(path).bytecode.object); + } else { + const legacyPath = `${pathToHome}/contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin`; + bootloaderCode = ethers.hexlify(fs.readFileSync(legacyPath)); + } + bootloaderHash = ethers.hexlify(zksync.utils.hashBytecode(bootloaderCode)); const txHandle = await tester.syncWallet.requestExecute({ contractAddress: ethers.ZeroAddress, @@ -354,7 +377,7 @@ describe('Upgrade test', function () { components: serverComponents, stdio: ['ignore', logs, logs], cwd: pathToHome, - useZkInception: fileConfig.loadFromFile, + useZkStack: fileConfig.loadFromFile, chain: fileConfig.chain }); await utils.sleep(10); diff --git a/core/tests/upgrade-test/tests/utils.ts b/core/tests/upgrade-test/tests/utils.ts index 7a7829caf86..2972f8411f5 100644 --- a/core/tests/upgrade-test/tests/utils.ts +++ b/core/tests/upgrade-test/tests/utils.ts @@ -7,19 +7,19 @@ export function runServerInBackground({ components, stdio, cwd, - useZkInception, + useZkStack, chain }: { components?: string[]; stdio: any; cwd?: Parameters[0]['cwd']; - useZkInception?: boolean; + useZkStack?: boolean; chain?: string; }) { let command = ''; - if (useZkInception) { - command = 'zk_inception server'; + if (useZkStack) { + command = 'zkstack server'; command += chain ? ` --chain ${chain}` : ''; } else { command = 'cd $ZKSYNC_HOME && cargo run --bin zksync_server --release --'; @@ -71,8 +71,8 @@ export interface Contracts { stateTransitonManager: any; } -export function initContracts(pathToHome: string, zkToolbox: boolean): Contracts { - if (zkToolbox) { +export function initContracts(pathToHome: string, zkStack: boolean): Contracts { + if (zkStack) { const CONTRACTS_FOLDER = `${pathToHome}/contracts`; return { l1DefaultUpgradeAbi: new ethers.Interface( @@ -88,10 +88,10 @@ export function initContracts(pathToHome: string, zkToolbox: boolean): Contracts require(`${CONTRACTS_FOLDER}/l1-contracts/out/ChainAdmin.sol/ChainAdmin.json`).abi ), l2ForceDeployUpgraderAbi: new ethers.Interface( - require(`${CONTRACTS_FOLDER}/l2-contracts/artifacts-zk/contracts/ForceDeployUpgrader.sol/ForceDeployUpgrader.json`).abi + require(`${CONTRACTS_FOLDER}/l2-contracts/zkout/ForceDeployUpgrader.sol/ForceDeployUpgrader.json`).abi ), complexUpgraderAbi: new ethers.Interface( - require(`${CONTRACTS_FOLDER}/system-contracts/artifacts-zk/contracts-preprocessed/ComplexUpgrader.sol/ComplexUpgrader.json`).abi + require(`${CONTRACTS_FOLDER}/system-contracts/zkout/ComplexUpgrader.sol/ComplexUpgrader.json`).abi ), counterBytecode: require(`${pathToHome}/core/tests/ts-integration/artifacts-zk/contracts/counter/counter.sol/Counter.json`) diff --git a/core/tests/vm-benchmark/Cargo.toml b/core/tests/vm-benchmark/Cargo.toml index 4586c637e12..59c1e21493b 100644 --- a/core/tests/vm-benchmark/Cargo.toml +++ b/core/tests/vm-benchmark/Cargo.toml @@ -11,6 +11,7 @@ zksync_multivm.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_vlog.workspace = true +zksync_vm2.workspace = true criterion.workspace = true once_cell.workspace = true diff --git a/core/tests/vm-benchmark/benches/iai.rs b/core/tests/vm-benchmark/benches/iai.rs index 6b8965afa4f..8cbb9f10dd8 100644 --- a/core/tests/vm-benchmark/benches/iai.rs +++ b/core/tests/vm-benchmark/benches/iai.rs @@ -31,4 +31,5 @@ make_functions_and_main!( write_and_decode => write_and_decode_legacy, event_spam => event_spam_legacy, slot_hash_collision => slot_hash_collision_legacy, + heap_read_write => heap_read_write_legacy, ); diff --git a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs index faf72a18f45..c274b039c9b 100644 --- a/core/tests/vm-benchmark/src/bin/compare_iai_results.rs +++ b/core/tests/vm-benchmark/src/bin/compare_iai_results.rs @@ -25,14 +25,7 @@ fn main() { .keys() .collect::>() .intersection(&iai_after.keys().collect()) - .filter_map(|&name| { - let diff = percent_difference(iai_before[name], iai_after[name]); - if diff.abs() > 2. { - Some((name, format!("{:+.1}%", diff))) - } else { - None - } - }) + .map(|&name| (name, percent_difference(iai_before[name], iai_after[name]))) .collect::>(); let duration_changes = opcodes_before @@ -47,12 +40,17 @@ fn main() { let mut nonzero_diff = false; - for name in perf_changes.keys().collect::>().union( - &duration_changes - .iter() - .filter_map(|(key, value)| (*value != 0).then_some(key)) - .collect(), - ) { + for name in perf_changes + .iter() + .filter_map(|(key, value)| (value.abs() > 2.).then_some(key)) + .collect::>() + .union( + &duration_changes + .iter() + .filter_map(|(key, value)| (*value != 0).then_some(key)) + .collect(), + ) + { // write the header before writing the first line of diff if !nonzero_diff { println!("Benchmark name | change in estimated runtime | change in number of opcodes executed \n--- | --- | ---"); @@ -63,7 +61,10 @@ fn main() { println!( "{} | {} | {}", name, - perf_changes.get(**name).unwrap_or(&n_a.clone()), + perf_changes + .get(**name) + .map(|percent| format!("{:+.1}%", percent)) + .unwrap_or(n_a.clone()), duration_changes .get(**name) .map(|abs_diff| format!( diff --git a/core/tests/vm-benchmark/src/bin/instruction_counts.rs b/core/tests/vm-benchmark/src/bin/instruction_counts.rs index f9bb04c01bf..96208007fd9 100644 --- a/core/tests/vm-benchmark/src/bin/instruction_counts.rs +++ b/core/tests/vm-benchmark/src/bin/instruction_counts.rs @@ -1,11 +1,16 @@ //! Runs all benchmarks and prints out the number of zkEVM opcodes each one executed. -use vm_benchmark::{BenchmarkingVm, BYTECODES}; +use vm_benchmark::{BenchmarkingVmFactory, Fast, Legacy, BYTECODES}; fn main() { for bytecode in BYTECODES { let tx = bytecode.deploy_tx(); let name = bytecode.name; - println!("{name} {}", BenchmarkingVm::new().instruction_count(&tx)); + println!("{name} {}", Fast::<()>::count_instructions(&tx)); + println!( + "{} {}", + name.to_string() + "_legacy", + Legacy::count_instructions(&tx) + ); } } diff --git a/core/tests/vm-benchmark/src/instruction_counter.rs b/core/tests/vm-benchmark/src/instruction_counter.rs index 48b1e3527ad..0899c4c9171 100644 --- a/core/tests/vm-benchmark/src/instruction_counter.rs +++ b/core/tests/vm-benchmark/src/instruction_counter.rs @@ -13,7 +13,6 @@ pub struct InstructionCounter { /// A tracer that counts the number of instructions executed by the VM. impl InstructionCounter { - #[allow(dead_code)] // FIXME: re-enable instruction counting once new tracers are merged pub fn new(output: Rc>) -> Self { Self { count: 0, output } } diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index 410c0e071b4..922fb24512b 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -5,12 +5,12 @@ use zksync_contracts::BaseSystemContracts; use zksync_multivm::{ interface::{ storage::{InMemoryStorage, StorageView}, - ExecutionResult, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, + ExecutionResult, InspectExecutionMode, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, - vm_fast, vm_latest, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled}, + vm_fast, + vm_latest::{self, constants::BATCH_COMPUTATIONAL_GAS_LIMIT, HistoryEnabled, ToTracerPointer}, zk_evm_latest::ethereum_types::{Address, U256}, }; use zksync_types::{ @@ -20,7 +20,7 @@ use zksync_types::{ }; use zksync_utils::bytecode::hash_bytecode; -use crate::transaction::PRIVATE_KEY; +use crate::{instruction_counter::InstructionCounter, transaction::PRIVATE_KEY}; static SYSTEM_CONTRACTS: Lazy = Lazy::new(BaseSystemContracts::load_from_disk); @@ -72,16 +72,19 @@ pub trait BenchmarkingVmFactory { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance; + + /// Counts instructions executed by the VM while processing the transaction. + fn count_instructions(tx: &Transaction) -> usize; } /// Factory for the new / fast VM. #[derive(Debug)] -pub struct Fast(()); +pub struct Fast(Tr); -impl BenchmarkingVmFactory for Fast { +impl BenchmarkingVmFactory for Fast { const LABEL: VmLabel = VmLabel::Fast; - type Instance = vm_fast::Vm<&'static InMemoryStorage>; + type Instance = vm_fast::Vm<&'static InMemoryStorage, Tr>; fn create( batch_env: L1BatchEnv, @@ -90,6 +93,29 @@ impl BenchmarkingVmFactory for Fast { ) -> Self::Instance { vm_fast::Vm::custom(batch_env, system_env, storage) } + + fn count_instructions(tx: &Transaction) -> usize { + let mut vm = BenchmarkingVm::>::default(); + vm.0.push_transaction(tx.clone()); + + #[derive(Default)] + struct InstructionCount(usize); + impl vm_fast::Tracer for InstructionCount { + fn before_instruction< + OP: zksync_vm2::interface::OpcodeType, + S: zksync_vm2::interface::GlobalStateInterface, + >( + &mut self, + _: &mut S, + ) { + self.0 += 1; + } + } + let mut tracer = InstructionCount(0); + + vm.0.inspect(&mut tracer, InspectExecutionMode::OneTx); + tracer.0 + } } /// Factory for the legacy VM (latest version). @@ -109,6 +135,19 @@ impl BenchmarkingVmFactory for Legacy { let storage = StorageView::new(storage).to_rc_ptr(); vm_latest::Vm::new(batch_env, system_env, storage) } + + fn count_instructions(tx: &Transaction) -> usize { + let mut vm = BenchmarkingVm::::default(); + vm.0.push_transaction(tx.clone()); + let count = Rc::new(RefCell::new(0)); + vm.0.inspect( + &mut InstructionCounter::new(count.clone()) + .into_tracer_pointer() + .into(), + InspectExecutionMode::OneTx, + ); + count.take() + } } #[derive(Debug)] @@ -143,7 +182,6 @@ impl Default for BenchmarkingVm { execution_mode: TxExecutionMode::VerifyExecute, default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, chain_id: L2ChainId::from(270), - pubdata_params: Default::default(), }, &STORAGE, )) @@ -153,7 +191,7 @@ impl Default for BenchmarkingVm { impl BenchmarkingVm { pub fn run_transaction(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { self.0.push_transaction(tx.clone()); - self.0.execute(VmExecutionMode::OneTx) + self.0.execute(InspectExecutionMode::OneTx) } pub fn run_transaction_full(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { @@ -170,13 +208,6 @@ impl BenchmarkingVm { } tx_result } - - pub fn instruction_count(&mut self, tx: &Transaction) -> usize { - self.0.push_transaction(tx.clone()); - let count = Rc::new(RefCell::new(0)); - self.0.execute(VmExecutionMode::OneTx); // FIXME: re-enable instruction counting once new tracers are merged - count.take() - } } impl BenchmarkingVm { @@ -191,64 +222,64 @@ impl BenchmarkingVm { } } -#[cfg(test)] -mod tests { - use assert_matches::assert_matches; - use zksync_contracts::read_bytecode; - use zksync_multivm::interface::ExecutionResult; - - use super::*; - use crate::{ - get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, - get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, - }; - - #[test] - fn can_deploy_contract() { - let test_contract = read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", - ); - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_deploy_tx(&test_contract)); - - assert_matches!(res.result, ExecutionResult::Success { .. }); - } - - #[test] - fn can_transfer() { - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_transfer_tx(0)); - assert_matches!(res.result, ExecutionResult::Success { .. }); - } - - #[test] - fn can_load_test() { - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_load_test_deploy_tx()); - assert_matches!(res.result, ExecutionResult::Success { .. }); - - let params = LoadTestParams::default(); - let res = vm.run_transaction(&get_load_test_tx(1, 10_000_000, params)); - assert_matches!(res.result, ExecutionResult::Success { .. }); - } - - #[test] - fn can_load_test_with_realistic_txs() { - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_load_test_deploy_tx()); - assert_matches!(res.result, ExecutionResult::Success { .. }); - - let res = vm.run_transaction(&get_realistic_load_test_tx(1)); - assert_matches!(res.result, ExecutionResult::Success { .. }); - } - - #[test] - fn can_load_test_with_heavy_txs() { - let mut vm = BenchmarkingVm::new(); - let res = vm.run_transaction(&get_load_test_deploy_tx()); - assert_matches!(res.result, ExecutionResult::Success { .. }); - - let res = vm.run_transaction(&get_heavy_load_test_tx(1)); - assert_matches!(res.result, ExecutionResult::Success { .. }); - } -} +// #[cfg(test)] +// mod tests { +// use assert_matches::assert_matches; +// use zksync_contracts::read_bytecode; +// use zksync_multivm::interface::ExecutionResult; +// +// use super::*; +// use crate::{ +// get_deploy_tx, get_heavy_load_test_tx, get_load_test_deploy_tx, get_load_test_tx, +// get_realistic_load_test_tx, get_transfer_tx, LoadTestParams, +// }; +// +// #[test] +// fn can_deploy_contract() { +// let test_contract = read_bytecode( +// "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json", +// ); +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_deploy_tx(&test_contract)); +// +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// +// #[test] +// fn can_transfer() { +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_transfer_tx(0)); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// +// #[test] +// fn can_load_test() { +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_load_test_deploy_tx()); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// +// let params = LoadTestParams::default(); +// let res = vm.run_transaction(&get_load_test_tx(1, 10_000_000, params)); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// +// #[test] +// fn can_load_test_with_realistic_txs() { +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_load_test_deploy_tx()); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// +// let res = vm.run_transaction(&get_realistic_load_test_tx(1)); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// +// #[test] +// fn can_load_test_with_heavy_txs() { +// let mut vm = BenchmarkingVm::new(); +// let res = vm.run_transaction(&get_load_test_deploy_tx()); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// +// let res = vm.run_transaction(&get_heavy_load_test_tx(1)); +// assert_matches!(res.result, ExecutionResult::Success { .. }); +// } +// } diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index 35a0faeb962..bd91a5a5b0e 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -3,20 +3,20 @@ services: reth: restart: always image: "ghcr.io/paradigmxyz/reth:v1.0.6" + ports: + - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata target: /chaindata command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config - ports: - - 127.0.0.1:8545:8545 zk: - image: ghcr.io/matter-labs/zk-environment:cuda-12-0-latest + image: ghcr.io/matter-labs/zk-environment:cuda-12_0-latest depends_on: - reth - postgres @@ -49,11 +49,18 @@ services: - /dev/nvidia-uvm-tools:/dev/nvidia-uvm-tools env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host + pid: host deploy: resources: reservations: devices: - capabilities: [ gpu ] + postgres: image: "postgres:14" command: postgres -c 'max_connections=200' @@ -62,3 +69,7 @@ services: environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here - POSTGRES_PASSWORD=notsecurepassword + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index f95ae0d5f54..32665eb7010 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -16,7 +16,7 @@ services: - 127.0.0.1:8545:8545 zk: - image: "ghcr.io/matter-labs/zk-environment:cuda-11-8-latest" + image: "ghcr.io/matter-labs/zk-environment:cuda-11_8-latest" container_name: zk depends_on: - reth @@ -40,6 +40,11 @@ services: - GITHUB_WORKSPACE=$GITHUB_WORKSPACE env_file: - ./.env + extra_hosts: + - "host:host-gateway" + profiles: + - runner + network_mode: host deploy: resources: reservations: diff --git a/docker-compose-runner-nightly.yml b/docker-compose-runner-nightly.yml index cadd1009f7a..4a854aa0b0a 100644 --- a/docker-compose-runner-nightly.yml +++ b/docker-compose-runner-nightly.yml @@ -1,4 +1,3 @@ -version: '3.2' services: zk: image: ghcr.io/matter-labs/zk-environment:latest2.0-lightweight-nightly @@ -15,3 +14,7 @@ services: extends: file: docker-compose.yml service: reth + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker-compose-unit-tests.yml b/docker-compose-unit-tests.yml index ddbc76bb196..b839be2d9f4 100644 --- a/docker-compose-unit-tests.yml +++ b/docker-compose-unit-tests.yml @@ -1,4 +1,3 @@ -version: '3.2' name: unit_tests services: # An instance of postgres configured to execute Rust unit-tests, tuned for performance. diff --git a/docker-compose.yml b/docker-compose.yml index 1e3a273ec9a..d8f40720fe8 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,4 +1,3 @@ -version: '3.2' services: reth: restart: always @@ -6,8 +5,8 @@ services: ports: - 127.0.0.1:8545:8545 volumes: - - type: bind - source: ./volumes/reth/data + - type: volume + source: reth-data target: /rethdata - type: bind source: ./etc/reth/chaindata @@ -22,8 +21,8 @@ services: ports: - 127.0.0.1:5432:5432 volumes: - - type: bind - source: ./volumes/postgres + - type: volume + source: postgres-data target: /var/lib/postgresql/data environment: # We bind only to 127.0.0.1, so setting insecure password is acceptable here @@ -56,3 +55,7 @@ services: profiles: - runner network_mode: host + +volumes: + postgres-data: + reth-data: \ No newline at end of file diff --git a/docker/Makefile b/docker/Makefile index 72189902aa1..4e0ca51f904 100644 --- a/docker/Makefile +++ b/docker/Makefile @@ -55,18 +55,20 @@ check-tools: check-nodejs check-yarn check-rust check-sqlx-cli check-docker chec # Check that contracts are checkout properly check-contracts: - @if [ ! -d ../contracts/l1-contracts/lib/forge-std/foundry.toml ] || [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ - echo "l1-contracts git submodule is missing. Please re-download repo with `git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git`"; \ + @if [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ + echo "l1-contracts git submodule is missing. Please re-download repo with 'git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git'"; \ exit 1; \ fi # Build and download needed contracts +# TODO Remove mkdir once we use foundry inside contracts repo prepare-contracts: check-tools check-contracts @cd ../ && \ export ZKSYNC_HOME=$$(pwd) && \ - export PATH=$$PATH:$${ZKSYNC_HOME}/bin && \ - zkt || true && \ - zk_supervisor contracts + export PATH=$$PATH:$${ZKSYNC_HOME}/bin:$${ZKSYNC_HOME}/zkstack_cli/zkstackup && \ + zkstackup -g --local || true && \ + zkstack dev contracts && \ + mkdir -p contracts/l1-contracts/artifacts # Download setup-key prepare-keys: @@ -91,9 +93,12 @@ build-witness-generator: check-tools prepare-keys $(DOCKER_BUILD_CMD) --file witness-generator/Dockerfile --load \ --tag witness-generator:$(PROTOCOL_VERSION) $(CONTEXT) +build-external-node: check-tools prepare-contracts + $(DOCKER_BUILD_CMD) --file external-node/Dockerfile --load \ + --tag external-node:$(PROTOCOL_VERSION) $(CONTEXT) # Build all containers -build-all: build-contract-verifier build-server-v2 build-witness-generator build-circuit-prover-gpu cleanup +build-all: build-contract-verifier build-server-v2 build-witness-generator build-circuit-prover-gpu build-external-node cleanup # Clean generated images clean-all: @@ -102,3 +107,4 @@ clean-all: docker rmi server-v2:$(PROTOCOL_VERSION) >/dev/null 2>&1 docker rmi prover:$(PROTOCOL_VERSION) >/dev/null 2>&1 docker rmi witness-generator:$(PROTOCOL_VERSION) >/dev/null 2>&1 + docker rmi external-node:$(PROTOCOL_VERSION) >/dev/null 2>&1 diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7943dae835a..e9d83903d11 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -47,7 +47,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ @@ -68,7 +68,7 @@ RUN for VERSION in $(seq -f "v1.4.%g" 0 1); do \ done # install zkvyper 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 6); do \ mkdir -p /etc/zkvyper-bin/$VERSION && \ wget https://github.com/matter-labs/zkvyper-bin/raw/main/linux-amd64/zkvyper-linux-amd64-musl-$VERSION -O /etc/zkvyper-bin/$VERSION/zkvyper && \ chmod +x /etc/zkvyper-bin/$VERSION/zkvyper; \ @@ -91,10 +91,13 @@ RUN mkdir -p /etc/vyper-bin/0.3.10 \ && wget -O vyper0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10%2Bcommit.91361694.linux \ && mv vyper0.3.10 /etc/vyper-bin/0.3.10/vyper \ && chmod +x /etc/vyper-bin/0.3.10/vyper +RUN mkdir -p /etc/vyper-bin/0.4.0 \ + && wget -O vyper0.4.0 https://github.com/vyperlang/vyper/releases/download/v0.4.0/vyper.0.4.0+commit.e9db8d9f.linux \ + && mv vyper0.4.0 /etc/vyper-bin/0.4.0/vyper \ + && chmod +x /etc/vyper-bin/0.4.0/vyper COPY --from=builder /usr/src/zksync/target/release/zksync_contract_verifier /usr/bin/ -COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ -COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/system-contracts/zkout/ /contracts/system-contracts/zkout/ # CMD tail -f /dev/null ENTRYPOINT ["zksync_contract_verifier"] diff --git a/docker/contract-verifier/install-all-solc.sh b/docker/contract-verifier/install-all-solc.sh index 4fe992f8357..0c24b074130 100755 --- a/docker/contract-verifier/install-all-solc.sh +++ b/docker/contract-verifier/install-all-solc.sh @@ -26,7 +26,7 @@ done # Download zkVM solc list=( "0.8.25-1.0.0" "0.8.24-1.0.0" "0.8.23-1.0.0" "0.8.22-1.0.0" "0.8.21-1.0.0" "0.8.20-1.0.0" "0.8.19-1.0.0" "0.8.18-1.0.0" "0.8.17-1.0.0" "0.8.16-1.0.0" "0.8.15-1.0.0" "0.8.14-1.0.0" "0.8.13-1.0.0" "0.8.12-1.0.0" "0.8.11-1.0.0" "0.8.10-1.0.0" "0.8.9-1.0.0" "0.8.8-1.0.0" "0.8.7-1.0.0" "0.8.6-1.0.0" "0.8.5-1.0.0" "0.8.4-1.0.0" "0.8.3-1.0.0" "0.8.2-1.0.0" "0.8.1-1.0.0" "0.8.0-1.0.0" "0.7.6-1.0.0" "0.7.5-1.0.0" "0.7.4-1.0.0" "0.7.3-1.0.0" "0.7.2-1.0.0" "0.7.1-1.0.0" "0.7.0-1.0.0" "0.6.12-1.0.0" "0.6.11-1.0.0" "0.6.10-1.0.0" "0.6.9-1.0.0" "0.6.8-1.0.0" "0.6.7-1.0.0" "0.6.6-1.0.0" "0.6.5-1.0.0" "0.6.4-1.0.0" "0.6.3-1.0.0" "0.6.2-1.0.0" "0.6.1-1.0.0" "0.6.0-1.0.0" "0.5.17-1.0.0" "0.5.16-1.0.0" "0.5.15-1.0.0" "0.5.14-1.0.0" "0.5.13-1.0.0" "0.5.12-1.0.0" "0.5.11-1.0.0" "0.5.10-1.0.0" "0.5.9-1.0.0" "0.5.8-1.0.0" "0.5.7-1.0.0" "0.5.6-1.0.0" "0.5.5-1.0.0" "0.5.4-1.0.0" "0.5.3-1.0.0" "0.5.2-1.0.0" "0.5.1-1.0.0" "0.5.0-1.0.0" "0.4.26-1.0.0" "0.4.25-1.0.0" "0.4.24-1.0.0" "0.4.23-1.0.0" "0.4.22-1.0.0" "0.4.21-1.0.0" "0.4.20-1.0.0" "0.4.19-1.0.0" "0.4.18-1.0.0" "0.4.17-1.0.0" "0.4.16-1.0.0" "0.4.15-1.0.0" "0.4.14-1.0.0" "0.4.13-1.0.0" "0.4.12-1.0.0" - "0.8.27-1.0.1" "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" + "0.8.28-1.0.1" "0.8.27-1.0.1" "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" ) for version in ${list[@]}; do diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index aa1089ae7b3..f5c55860740 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -23,12 +23,9 @@ COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/local/cargo/bin/sqlx /usr/bin COPY --from=builder /usr/src/zksync/docker/external-node/entrypoint.sh /usr/bin -COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ -COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/system-contracts/zkout/ /contracts/system-contracts/zkout/ COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ -COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ +COPY contracts/l2-contracts/zkout/ /contracts/l2-contracts/zkout/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ diff --git a/docker/prover-autoscaler/Dockerfile b/docker/prover-autoscaler/Dockerfile new file mode 100644 index 00000000000..246e8099ffd --- /dev/null +++ b/docker/prover-autoscaler/Dockerfile @@ -0,0 +1,25 @@ +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +ARG DEBIAN_FRONTEND=noninteractive + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + +WORKDIR /usr/src/zksync +COPY . . + +RUN cd prover && cargo build --release --bin zksync_prover_autoscaler + +FROM ghcr.io/matter-labs/zksync-runtime-base:latest + +COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_autoscaler /usr/bin/ + +ENTRYPOINT ["/usr/bin/zksync_prover_autoscaler"] diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index 3e8b4f16bca..319d0cefbe3 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -31,12 +31,9 @@ EXPOSE 3030 COPY --from=builder /usr/src/zksync/target/release/zksync_server /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin COPY --from=builder /usr/src/zksync/target/release/merkle_tree_consistency_checker /usr/bin -COPY contracts/system-contracts/bootloader/build/artifacts/ /contracts/system-contracts/bootloader/build/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/system-contracts/contracts-preprocessed/artifacts/ -COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ -COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk +COPY contracts/system-contracts/zkout/ /contracts/system-contracts/zkout/ COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ -COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ +COPY contracts/l2-contracts/zkout/ /contracts/l2-contracts/zkout/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ diff --git a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile similarity index 94% rename from docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile index 0c0fd7a9bb3..fe44d55acbb 100644 --- a/docker/zk-environment/20.04_amd64_cuda_11_8.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_11_8.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04@sha256:3246518d9735254519e1b2ff35f95686e4a5011c90c85344c1f38df7bae9dd37 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -31,19 +31,19 @@ RUN apt-get update && apt-get install -y \ wget \ bzip2 \ unzip \ - hub + hub \ + curl \ + gnutls-bin git \ + build-essential \ + clang \ + lldb \ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - curl \ - gnutls-bin git \ - build-essential \ - clang \ - lldb \ - lld \ liburing-dev \ libclang-dev @@ -83,6 +83,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ @@ -104,7 +109,7 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile similarity index 95% rename from docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile rename to docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile index 5bd569b7d20..da041b12181 100644 --- a/docker/zk-environment/20.04_amd64_cuda_12_0.Dockerfile +++ b/docker/zk-environment/22.04_amd64_cuda_12_0.Dockerfile @@ -1,4 +1,4 @@ -FROM ubuntu:20.04 as base +FROM ubuntu:22.04@sha256:3d1556a8a18cf5307b121e0a98e93f1ddf1f3f8e092f1fddfd941254785b95d7 as base # Link Docker Image with repository # https://docs.github.com/en/packages/learn-github-packages/connecting-a-repository-to-a-package#connecting-a-repository-to-a-container-image-using-the-command-line @@ -16,7 +16,7 @@ RUN apt-get update && apt-get install -y \ git \ openssl \ libssl-dev \ - gcc \ + gcc-10 \ g++ \ curl \ pkg-config \ @@ -30,18 +30,18 @@ RUN apt-get update && apt-get install -y \ gnupg2 \ postgresql-client \ hub \ - unzip + unzip \ + gnutls-bin \ + build-essential \ + clang \ + lldb\ + lld # Install dependencies for RocksDB. `liburing` is not available for Ubuntu 20.04, # so we use a PPA with the backport RUN add-apt-repository ppa:savoury1/virtualisation && \ apt-get update && \ apt-get install -y \ - gnutls-bin \ - build-essential \ - clang \ - lldb\ - lld \ liburing-dev \ libclang-dev @@ -81,6 +81,11 @@ RUN rustup default stable RUN cargo install --version=0.8.0 sqlx-cli RUN cargo install cargo-nextest +RUN git clone https://github.com/matter-labs/foundry-zksync +RUN cd foundry-zksync && cargo build --release --bins +RUN mv ./foundry-zksync/target/release/forge /usr/local/cargo/bin/ +RUN mv ./foundry-zksync/target/release/cast /usr/local/cargo/bin/ + # Copy compiler (both solc and zksolc) binaries # Obtain `solc` 0.8.20. RUN wget -c https://github.com/ethereum/solc-bin/raw/gh-pages/linux-amd64/solc-linux-amd64-v0.8.20%2Bcommit.a1b79de6 \ @@ -93,7 +98,7 @@ RUN wget -c https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksol # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 RUN cargo install sccache ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docker/zk-environment/Dockerfile b/docker/zk-environment/Dockerfile index 53e53265311..c04e5720e4d 100644 --- a/docker/zk-environment/Dockerfile +++ b/docker/zk-environment/Dockerfile @@ -164,7 +164,7 @@ RUN wget -c https://sourceware.org/pub/valgrind/valgrind-3.20.0.tar.bz2 && \ # Setup the environment ENV ZKSYNC_HOME=/usr/src/zksync -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" +ENV PATH="${ZKSYNC_HOME}/bin:${ZKSYNC_HOME}/zkstack_cli/zkstackup:${HOME}/.local/bin:${PATH}" ENV CI=1 ENV RUSTC_WRAPPER=/usr/local/cargo/bin/sccache diff --git a/docs/guides/build-docker.md b/docs/guides/build-docker.md index a9e8f5d3e76..5dd9cff022b 100644 --- a/docs/guides/build-docker.md +++ b/docs/guides/build-docker.md @@ -25,6 +25,7 @@ contract-verifier:2.0 server-v2:2.0 prover:2.0 witness-generator:2.0 +external-node:2.0 ``` Alternatively, you may build only needed components - available targets are @@ -34,6 +35,7 @@ make -C ./docker build-contract-verifier make -C ./docker build-server-v2 make -C ./docker build-circuit-prover-gpu make -C ./docker build-witness-generator +make -C ./docker build-external-node ``` ## Building updated images diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 67a1b89eef5..07e52085cf4 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -6,6 +6,8 @@ Install `docker compose` and `Docker` ## Running ZKsync node locally +These commands start ZKsync node locally inside docker. + To start a mainnet instance, run: ```sh @@ -34,9 +36,10 @@ cd docker-compose-examples sudo docker compose --file testnet-external-node-docker-compose.yml down --volumes ``` -You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). +### Observability -Those commands start ZKsync node locally inside docker. +You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). You +can also access a debug page with more information about the node [here](http://localhost:5000). The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be accessed on port `3061`. diff --git a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml index c2bef23b2e4..f2a0ce31875 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml @@ -1,6 +1,6 @@ server_addr: '0.0.0.0:3054' public_addr: '127.0.0.1:3054' -debug_page_addr: '127.0.0.1:5000' +debug_page_addr: '0.0.0.0:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 gossip_static_outbound: diff --git a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml index 7a82705990c..a5f752fe405 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml @@ -1,6 +1,6 @@ server_addr: '0.0.0.0:3054' public_addr: '127.0.0.1:3054' -debug_page_addr: '127.0.0.1:5000' +debug_page_addr: '0.0.0.0:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 gossip_static_outbound: diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json index be869ead40b..74b4b822801 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 3, + "id": 2, "links": [], "liveNow": false, "panels": [ @@ -1005,7 +1005,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "Rate of RPC client requests, in packets per second.", + "description": "Rate of RPC client requests, in requests per second.", "fieldConfig": { "defaults": { "color": { @@ -1054,7 +1054,7 @@ } ] }, - "unit": "pps" + "unit": "reqps" }, "overrides": [] }, @@ -1098,7 +1098,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "Rate of RPC server responses, in packets per second.", + "description": "Rate of RPC server responses, in requests per second.", "fieldConfig": { "defaults": { "color": { @@ -1147,7 +1147,7 @@ } ] }, - "unit": "pps" + "unit": "reqps" }, "overrides": [] }, @@ -1202,6 +1202,6 @@ "timezone": "", "title": "Consensus", "uid": "STAAEORNk", - "version": 4, + "version": 2, "weekStart": "" } \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json index d7177ae802e..0b3cb681e3b 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 2, + "id": 1, "links": [], "liveNow": false, "panels": [ @@ -103,13 +103,49 @@ "mode": "palette-classic" }, "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 0, + "gradientMode": "none", "hideFrom": { "legend": false, "tooltip": false, "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "normal" + }, + "thresholdsStyle": { + "mode": "off" } }, "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, "unit": "bytes" }, "overrides": [] @@ -123,18 +159,11 @@ "id": 2, "options": { "legend": { + "calcs": [], "displayMode": "list", "placement": "bottom", "showLegend": true }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, "tooltip": { "mode": "single", "sort": "none" @@ -167,7 +196,7 @@ } ], "title": "Total disk space usage", - "type": "piechart" + "type": "timeseries" }, { "datasource": { @@ -409,6 +438,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "Shows the batch numbers on the local node and the server node.", "fieldConfig": { "defaults": { "color": { @@ -421,7 +451,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 33, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -470,13 +500,13 @@ "x": 12, "y": 16 }, - "id": 4, + "id": 39, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { "mode": "single", @@ -489,14 +519,28 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "builder", "exemplar": true, - "expr": "sum by (stage) (external_node_sync_lag)", + "expr": "sum by(stage) (external_node_fetcher_l1_batch{stage=\"open\"})", "interval": "", - "legendFormat": "", + "legendFormat": "Server", + "range": true, "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "zksync_consensus_storage_batch_store_next_persisted_batch", + "hide": false, + "legendFormat": "Local", + "range": true, + "refId": "B" } ], - "title": "Sync lag (blocks)", + "title": "L1 batch sync lag", "transformations": [], "type": "timeseries" }, @@ -546,8 +590,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -598,7 +641,6 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "description": "The percentage of transactions that are being reverted or that are succeeding.", "fieldConfig": { "defaults": { "color": { @@ -610,8 +652,8 @@ "axisLabel": "", "axisPlacement": "auto", "barAlignment": 0, - "drawStyle": "bars", - "fillOpacity": 0, + "drawStyle": "line", + "fillOpacity": 33, "gradientMode": "none", "hideFrom": { "legend": false, @@ -619,16 +661,19 @@ "viz": false }, "lineInterpolation": "linear", - "lineWidth": 2, + "lineStyle": { + "fill": "solid" + }, + "lineWidth": 1, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", - "spanNulls": false, + "spanNulls": true, "stacking": { "group": "A", - "mode": "percent" + "mode": "none" }, "thresholdsStyle": { "mode": "off" @@ -639,8 +684,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -657,13 +701,13 @@ "x": 12, "y": 24 }, - "id": 38, + "id": 4, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": true + "showLegend": false }, "tooltip": { "mode": "single", @@ -676,14 +720,15 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "editorMode": "builder", - "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", - "legendFormat": "__auto", - "range": true, + "exemplar": true, + "expr": "sum by (stage) (external_node_sync_lag)", + "interval": "", + "legendFormat": "", "refId": "A" } ], - "title": "Transactions execution status (%)", + "title": "L2 blocks sync lag", + "transformations": [], "type": "timeseries" }, { @@ -731,8 +776,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -778,6 +822,98 @@ "title": "Avg number of transactions in L2 block", "type": "timeseries" }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "The percentage of transactions that are being reverted or that are succeeding.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "percent" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 38, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions execution status (%)", + "type": "timeseries" + }, { "datasource": { "type": "prometheus", @@ -823,8 +959,7 @@ "mode": "absolute", "steps": [ { - "color": "green", - "value": null + "color": "green" }, { "color": "red", @@ -839,7 +974,7 @@ "h": 8, "w": 12, "x": 12, - "y": 32 + "y": 40 }, "id": 34, "options": { @@ -886,6 +1021,6 @@ "timezone": "", "title": "General", "uid": "1", - "version": 9, + "version": 3, "weekStart": "" } \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml index 65f33c78b0e..fac65298bbc 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/default.yml @@ -5,6 +5,7 @@ providers: orgId: 1 folder: '' type: file + allowUiUpdates: true disableDeletion: false updateIntervalSeconds: 10 # How often Grafana will scan for changed dashboards options: diff --git a/docs/specs/README.md b/docs/specs/README.md index 1f163bf7845..d0b087ae93e 100644 --- a/docs/specs/README.md +++ b/docs/specs/README.md @@ -33,4 +33,4 @@ 1. [ZK Chain ecosystem](./zk_chains/README.md) - [Overview](./zk_chains/overview.md) - [Shared Bridge](./zk_chains/shared_bridge.md) - - [Hyperbridges](./zk_chains/hyperbridges.md) + - [Interop](./zk_chains/interop.md) diff --git a/docs/specs/zk_chains/README.md b/docs/specs/zk_chains/README.md index 4de575899dd..ce0a7c311a2 100644 --- a/docs/specs/zk_chains/README.md +++ b/docs/specs/zk_chains/README.md @@ -2,4 +2,4 @@ - [Overview](./overview.md) - [Shared Bridge](./shared_bridge.md) -- [Hyperbridges](./hyperbridges.md) +- [Interop](./interop.md) diff --git a/docs/specs/zk_chains/gateway.md b/docs/specs/zk_chains/gateway.md new file mode 100644 index 00000000000..f4ee68e242e --- /dev/null +++ b/docs/specs/zk_chains/gateway.md @@ -0,0 +1 @@ +# Gateway diff --git a/docs/specs/zk_chains/hyperbridges.md b/docs/specs/zk_chains/hyperbridges.md deleted file mode 100644 index 614fe61427e..00000000000 --- a/docs/specs/zk_chains/hyperbridges.md +++ /dev/null @@ -1,41 +0,0 @@ -# Hyperbridges - -## Introduction - -In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized -that the core feature is hyperbridging, but we did not outline the hyperbridges themselves. This is because hyperbridges -are mostly L2 contracts. In this document we describe what hyperbridges are, and specify the necessary infrastructure. - -### Hyperbridge description - -Hyperbridges are trustless and cheap general native bridges between ZK Chains, allowing cross-chain function calls. -Trustlessness is achieved by relying on the main ZK Chain bridge to send a compressed message to L1, which is then sent -to and expanded on the destination ZK Chain. - -Technically they are a system of smart contracts that build on top of the enshrined L1<>L2 validating bridges, and can -interpret messages sent from L2 to L2 by verifying Merkle proofs. They are built alongside the protocol, they can -transfer the native asset of the ecosystem, and they can be used for asynchronous function calls between ZK Chains. - -![Hyperbridges](./img/hyperbridges.png) - -The trustless nature of hyperbridges allows the ecosystem to resemble a single VM. To illustrate imagine a new ZK Chain -joining the ecosystem. We will want ether/Dai/etc. to be accessible on this ZK Chain. This can be done automatically. -There will be a central erc20 deployer contract in the ecosystem, which will deploy the new ERC20 contract via the -hyperbridge. After the contract is deployed it will be able to interact other Dai contracts in the ecosystem. - -### High Level design - -![Hyperbridging](./img/hyperbridging.png) - -### L1 - -For the larger context see the [Shared Bridge](./shared_bridge.md) document, here we will focus on - -- HyperMailbox (part of Bridgehub). Contains the Hyperroot, root of Merkle tree of Hyperlogs. Hyperlogs are the L2->L1 - SysLogs that record the sent hyperbridge messages from the L2s. - -### L2 Contracts - -- Outbox system contract. It collects the hyperbridge txs into the hyperlog of the ZK Chain. -- Inbox system contract. This is where the hyperroot is imported and sent to L1 for settlement. Merkle proofs are - verified here, tx calls are started from here, nullifiers are stored here (add epochs later) diff --git a/docs/specs/zk_chains/interop.md b/docs/specs/zk_chains/interop.md new file mode 100644 index 00000000000..947742909b8 --- /dev/null +++ b/docs/specs/zk_chains/interop.md @@ -0,0 +1,49 @@ +# Interop + +## Introduction + +In the Shared bridge document we described how the L1 smart contracts work to support multiple chains, and we emphasized +that the core feature is interop. Interop happens via the same L1->L2 interface as described in the L1SharedBridge doc. +There is (with the interop upgrade) a Bridgehub, AssetRouter, NativeTokenVault and Nullifier deployed on every L2, and +they serve the same feature as their L1 counterparts. Namely: + +- The Bridgehub is used to start the transaction. +- The AssetRouter and NativeTokenVault are the bridge contract that handle the tokens. +- The Nullifier is used to prevent reexecution of xL2 txs. + +### Interop process + +![Interop](./img/hyperbridging.png) + +The interop process has 7 main steps, each with its substeps: + +1. Starting the transaction on the sending chain + + - The user/calls calls the Bridgehub contract. If they want to use a bridge they call + `requestL2TransactionTwoBridges`, if they want to make a direct call they call `requestL2TransactionDirect` + function. + - The Bridgehub collects the base token fees necessary for the interop tx to be processed on the destination chain, + and if using the TwoBridges method the calldata and the destination contract ( for more data see Shared bridge + doc). + - The Bridgehub emits a `NewPriorityRequest` event, this is the same as the one in our Mailbox contract. This event + specifies the xL2 txs, which uses the same format as L1->L2 txs. This event can be picked up and used to receive + the txs. + - This new priority request is sent as an L2->L1 message, it is included in the chains merkle tree of emitted txs. + +2. The chain settles its proof on L1 or the Gateway, whichever is used as the settlement layer for the chain. +3. On the Settlement Layer (SL), the MessageRoot is updated in the MessageRoot contract. The new data includes all the + L2->L1 messages that are emitted from the settling chain. +4. The receiving chain picks up the updated MessgeRoot from the Settlement Layer. +5. Now the xL2 txs can be imported on the destination chain. Along with the txs, a merkle proof needs to be sent to link + it to the MessageRoot. +6. Receiving the tx on the destination chain + + - On the destination chain the xL2 txs is verified. This means the merkle proof is checked agains the MessageRoot. + This shows the the xL2 txs was indeed sent. + - After this the txs can be executed. The tx hash is stored in the L2Nullifier contract, so that the txs cannot be + replayed. + - The specified contract is called, with the calldata, and the message sender = + `keccak256(originalMessageSender, originChainId) >> 160`. This is to prevent the collision of the msg.sender + addresses. + +7. The destination chain settles on the SL and the MessageRoot that it imported is checked. diff --git a/docs/specs/zk_chains/shared_bridge.md b/docs/specs/zk_chains/shared_bridge.md index c464a7a154b..b43d3082b62 100644 --- a/docs/specs/zk_chains/shared_bridge.md +++ b/docs/specs/zk_chains/shared_bridge.md @@ -17,7 +17,7 @@ If you want to know more about ZK Chains, check this We want to create a system where: - ZK Chains should be launched permissionlessly within the ecosystem. -- Hyperbridges should enable unified liquidity for assets across the ecosystem. +- Interop should enable unified liquidity for assets across the ecosystem. - Multi-chain smart contracts need to be easy to develop, which means easy access to traditional bridges, and other supporting architecture. @@ -58,20 +58,19 @@ be able to leverage them when available). #### Bridgehub - Acts as a hub for bridges, so that they have a single point of communication with all ZK Chain contracts. This allows - L1 assets to be locked in the same contract for all ZK Chains, including L3s and validiums. The `Bridgehub` also - implements the following: + L1 assets to be locked in the same contract for all ZK Chains. The `Bridgehub` also implements the following features: - `Registry` This is where ZK Chains can register, starting in a permissioned manner, but with the goal to be - permissionless in the future. This is where their `chainID` is determined. L3s will also register here. This - `Registry` is also where State Transition contracts should register. Each chain has to specify its desired ST when - registering (Initially, only one will be available). + permissionless in the future. This is where their `chainID` is determined. Chains on Gateway will also register here. + This `Registry` is also where Chain Type Manager contracts should register. Each chain has to specify its desired CTM + when registering (Initially, only one will be available). ``` function newChain( uint256 _chainId, - address _stateTransition + address _chainTypeManager ) external returns (uint256 chainId); - function newStateTransition(address _stateTransition) external; + function newChainTypeManager(address _chainTypeManager) external; ``` - `BridgehubMailbox` routes messages to the Diamond proxy’s Mailbox facet based on chainID @@ -79,43 +78,73 @@ be able to leverage them when available). - Same as the current zkEVM [Mailbox](https://github.com/matter-labs/era-contracts/blob/main/l1-contracts/contracts/zksync/facets/Mailbox.sol), just with chainId, - - Ether needs to be deposited and withdrawn from here. - This is where L2 transactions can be requested. ``` - function requestL2Transaction( - uint256 _chainId, - address _contractL2, - uint256 _l2Value, - bytes calldata _calldata, - uint256 _l2GasLimit, - uint256 _l2GasPerPubdataByteLimit, - bytes[] calldata _factoryDeps, - address _refundRecipient - ) public payable override returns (bytes32 canonicalTxHash) { - address proofChain = bridgeheadStorage.proofChain[_chainId]; - canonicalTxHash = IProofChain(proofChain).requestL2TransactionBridgehead( - _chainId, - msg.value, - msg.sender, - _contractL2, - _l2Value, - _calldata, - _l2GasLimit, - _l2GasPerPubdataByteLimit, - _factoryDeps, - _refundRecipient - ); - } + function requestL2TransactionTwoBridges( + L2TransactionRequestTwoBridgesOuter calldata _request + ) ``` -- `Hypermailbox` - - This will allow general message passing (L2<>L2, L2<>L3, etc). This is where the `Mailbox` sends the `Hyperlogs`. - `Hyperlogs` are commitments to these messages sent from a single ZK Chain. `Hyperlogs` are aggregated into a - `HyperRoot` in the `HyperMailbox`. - - This component has not been implemented yet + ``` + struct L2TransactionRequestTwoBridgesOuter { + uint256 chainId; + uint256 mintValue; + uint256 l2Value; + uint256 l2GasLimit; + uint256 l2GasPerPubdataByteLimit; + address refundRecipient; + address secondBridgeAddress; + uint256 secondBridgeValue; + bytes secondBridgeCalldata; + } + ``` -#### Main asset shared bridges +``` + struct L2TransactionRequestTwoBridgesInner { + bytes32 magicValue; + address l2Contract; + bytes l2Calldata; + bytes[] factoryDeps; + bytes32 txDataHash; +} +``` + +- The `requestL2TransactionTwoBridges` function should be used most of the time when bridging to a chain ( the exeption + is when the user bridges directly to a contract on the L2, without using a bridge contract on L1). The logic of it is + the following: + + - The user wants to bridge to chain with the provided `L2TransactionRequestTwoBridgesOuter.chainId`. + - Two bridges are called, the baseTokenBridge (i.e. the L1SharedBridge or L1AssetRouter after the Gateway upgrade) and + an arbitrary second bridge. The Bridgehub will provide the original caller address to both bridges, which can + request that the appropriate amount of tokens are transferred from the caller to the bridge. The caller has to set + the appropriate allowance for both bridges. (Often the bridges coincide, but they don't have to). + - The `L2TransactionRequestTwoBridgesOuter.mintValue` is the amount of baseTokens that will be minted on L2. This is + the amount of tokens that the baseTokenBridge will request from the user. If the baseToken is Eth, it will be + forwarded to the baseTokenBridge. + - The `L2TransactionRequestTwoBridgesOuter.l2Value` is the amount of tokens that will be deposited on L2. The second + bridge and the Mailbox receives this as an input (although our second bridge does not use the value). + - The `L2TransactionRequestTwoBridgesOuter.l2GasLimit` is the maximum amount of gas that will be spent on L2 to + complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.l2GasPerPubdataByteLimit` is the maximum amount of gas per pubdata byte + that will be spent on L2 to complete the transaction. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.refundRecipient` is the address that will be refunded for the gas spent on + L2. The Mailbox receives this as an input. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeAddress` is the address of the second bridge that will be + called. This is the arbitrary address that is called from the Bridgehub. + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeValue` is the amount of tokens that will be deposited on L2. + The second bridge receives this value as the baseToken (i.e. Eth on L1). + - The `L2TransactionRequestTwoBridgesOuter.secondBridgeCalldata` is the calldata that will be passed to the second + bridge. This is the arbitrary calldata that is passed from the Bridgehub to the second bridge. + - The secondBridge returns the `L2TransactionRequestTwoBridgesInner` struct to the Bridgehub. This is also passed to + the Mailbox as input. This is where the destination contract, calldata, factoryDeps are determined on the L2. + + This setup allows the user to bridge the baseToken of the origin chain A to a chain B with some other baseToken, by + specifying the A's token in the secondBridgeValue, which will be minted on the destination chain as an ERC20 token, + and specifying the amount of B's token in the mintValue, which will be minted as the baseToken and used to cover the + gas costs. + +#### Main asset shared bridges L2TransactionRequestTwoBridgesInner - Some assets have to be natively supported (ETH, WETH) and it also makes sense to support some generally accepted token standards (ERC20 tokens), as this makes it easy to bridge those tokens (and ensures a single version of them exists on @@ -147,25 +176,18 @@ be able to leverage them when available). ); ``` -This topic is now covered more thoroughly by the Custom native token discussion. - -[Custom native token compatible with Hyperbridging](https://www.notion.so/Custom-native-token-compatible-with-Hyperbridging-54e190a1a76f44248cf84a38304a0641?pvs=21) +#### Chain Type Manager -#### State Transition - -- `StateTransition` A state transition manages proof verification and DA for multiple chains. It also implements the +- `ChainTypeManager` A chain type manager manages proof verification and DA for multiple chains. It also implements the following functionalities: - - `StateTransitionRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same - for all chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s - `Registry`. At registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK - Chain. + - `ChainTypeRegistry` The ST is shared for multiple chains, so initialization and upgrades have to be the same for all + chains. Registration is not permissionless but happens based on the registrations in the bridgehub’s `Registry`. At + registration a `DiamondProxy` is deployed and initialized with the appropriate `Facets` for each ZK Chain. - `Facets` and `Verifier` are shared across chains that relies on the same ST: `Base`, `Executor` , `Getters`, `Admin` , `Mailbox.`The `Verifier` is the contract that actually verifies the proof, and is called by the `Executor`. - Upgrade Mechanism The system requires all chains to be up-to-date with the latest implementation, so whenever an update is needed, we have to “force” each chain to update, but due to decentralization, we have to give each chain a - time frame (more information in the - [Upgrade Mechanism](https://www.notion.so/ZK-Stack-shared-bridge-alpha-version-a37c4746f8b54fb899d67e474bfac3bb?pvs=21) - section). This is done in the update mechanism contract, this is where the bootloader and system contracts are + time frame. This is done in the update mechanism contract, this is where the bootloader and system contracts are published, and the `ProposedUpgrade` is stored. Then each chain can call this upgrade for themselves as needed. After the deadline is over, the not-updated chains are frozen, that is, cannot post new proofs. Frozen chains can unfreeze by updating their proof system. @@ -180,6 +202,7 @@ This topic is now covered more thoroughly by the Custom native token discussion. - A chain might implement its own specific consensus mechanism. This needs its own contracts. Only this contract will be able to submit proofs to the State Transition contract. +- DA contracts. - Currently, the `ValidatorTimelock` is an example of such a contract. ### Components interactions @@ -199,22 +222,6 @@ features required to process proofs. The chain ID is set in the VM in a special -#### WETH Contract - -Ether, the native gas token is part of the core system contracts, so deploying it is not necessary. But WETH is just a -smart contract, it needs to be deployed and initialised. This happens from the L1 WETH bridge. This deploys on L2 the -corresponding bridge and ERC20 contract. This is deployed from L1, but the L2 address is known at deployment time. - -![deployWeth.png](./img/deployWeth.png) - -#### Deposit WETH - -The user can deposit WETH into the ecosystem using the WETH bridge on L1. The destination chain ID has to be specified. -The Bridgehub unwraps the WETH, and keeps the ETH, and send a message to the destination L2 to mint WETH to the -specified address. - -![depositWeth.png](./img/depositWeth.png) - --- ### Common Standards and Upgrades diff --git a/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol new file mode 100644 index 00000000000..baa0d37b753 --- /dev/null +++ b/etc/contracts-test-data/contracts/mock-evm/mock-evm.sol @@ -0,0 +1,243 @@ +// SPDX-License-Identifier: MIT OR Apache-2.0 + +pragma solidity ^0.8.0; + +/** + * Mock `KnownCodeStorage` counterpart producing `MarkedAsKnown` events and having `publishEVMBytecode` method + * added for EVM emulation, calls to which should be traced by the host. + */ +contract MockKnownCodeStorage { + event MarkedAsKnown(bytes32 indexed bytecodeHash, bool indexed sendBytecodeToL1); + + function markFactoryDeps(bool _shouldSendToL1, bytes32[] calldata _hashes) external { + unchecked { + uint256 hashesLen = _hashes.length; + for (uint256 i = 0; i < hashesLen; ++i) { + _markBytecodeAsPublished(_hashes[i], _shouldSendToL1); + } + } + } + + function markBytecodeAsPublished(bytes32 _bytecodeHash) external { + _markBytecodeAsPublished(_bytecodeHash, false); + } + + function _markBytecodeAsPublished(bytes32 _bytecodeHash, bool _shouldSendToL1) internal { + if (getMarker(_bytecodeHash) == 0) { + assembly { + sstore(_bytecodeHash, 1) + } + emit MarkedAsKnown(_bytecodeHash, _shouldSendToL1); + } + } + + bytes32 evmBytecodeHash; // For tests, it's OK to potentially collide with the marker slot for hash `bytes32(0)` + + /// Sets the EVM bytecode hash to be used in the next `publishEVMBytecode` call. + function setEVMBytecodeHash(bytes32 _bytecodeHash) external { + evmBytecodeHash = _bytecodeHash; + } + + function publishEVMBytecode(bytes calldata _bytecode) external { + bytes32 hash = evmBytecodeHash; + require(hash != bytes32(0), "EVM bytecode hash not set"); + + if (getMarker(evmBytecodeHash) == 0) { + assembly { + sstore(hash, 1) + } + } + emit MarkedAsKnown(hash, getMarker(hash) == 0); + evmBytecodeHash = bytes32(0); + } + + function getMarker(bytes32 _hash) public view returns (uint256 marker) { + assembly { + marker := sload(_hash) + } + } +} + +/** + * Mock `ContractDeployer` counterpart focusing on EVM bytecode deployment (via `create`; this isn't how real EVM bytecode deployment works, + * but it's good enough for low-level tests). + */ +contract MockContractDeployer { + enum AccountAbstractionVersion { + None, + Version1 + } + + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); + MockKnownCodeStorage constant KNOWN_CODE_STORAGE_CONTRACT = MockKnownCodeStorage(address(0x8004)); + + /// The returned value is obviously incorrect in the general case, but works well enough when called by the bootloader. + function extendedAccountVersion(address _address) public view returns (AccountAbstractionVersion) { + return AccountAbstractionVersion.Version1; + } + + /// Replaces real deployment with publishing a surrogate EVM "bytecode". + /// @param _salt bytecode hash + /// @param _input bytecode to publish + function create( + bytes32 _salt, + bytes32, // ignored, since it's not possible to set arbitrarily + bytes calldata _input + ) external payable returns (address) { + KNOWN_CODE_STORAGE_CONTRACT.setEVMBytecodeHash(_salt); + KNOWN_CODE_STORAGE_CONTRACT.publishEVMBytecode(_input); + address newAddress = address(uint160(msg.sender) + 1); + ACCOUNT_CODE_STORAGE_CONTRACT.storeAccountConstructedCodeHash(newAddress, _salt); + return newAddress; + } +} + +interface IAccountCodeStorage { + function getRawCodeHash(address _address) external view returns (bytes32); + function storeAccountConstructedCodeHash(address _address, bytes32 _hash) external; +} + +interface IRecursiveContract { + function recurse(uint _depth) external returns (uint); +} + +/// Native incrementing library. Not actually a library to simplify deployment. +contract IncrementingContract { + // Should not collide with other storage slots + uint constant INCREMENTED_SLOT = 0x123; + + function getIncrementedValue() public view returns (uint _value) { + assembly { + _value := sload(INCREMENTED_SLOT) + } + } + + function increment(address _thisAddress, uint _thisBalance) external { + require(msg.sender == tx.origin, "msg.sender not retained"); + require(address(this) == _thisAddress, "this address"); + require(address(this).balance == _thisBalance, "this balance"); + assembly { + sstore(INCREMENTED_SLOT, add(sload(INCREMENTED_SLOT), 1)) + } + } + + /// Tests delegation to a native or EVM contract at the specified target. + function testDelegateCall(address _target) external { + uint valueSnapshot = getIncrementedValue(); + (bool success, ) = _target.delegatecall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(success, "delegatecall reverted"); + require(getIncrementedValue() == valueSnapshot + 1, "invalid value"); + } + + function testStaticCall(address _target, uint _expectedValue) external { + (bool success, bytes memory rawValue) = _target.staticcall(abi.encodeCall( + this.getIncrementedValue, + () + )); + require(success, "static call reverted"); + (uint value) = abi.decode(rawValue, (uint)); + require(value == _expectedValue, "value mismatch"); + + (success, ) = _target.staticcall(abi.encodeCall( + IncrementingContract.increment, + (address(this), address(this).balance) + )); + require(!success, "staticcall should've reverted"); + } +} + +uint constant EVM_EMULATOR_STIPEND = 1 << 30; + +/** + * Mock EVM emulator used in low-level tests. + */ +contract MockEvmEmulator is IRecursiveContract, IncrementingContract { + IAccountCodeStorage constant ACCOUNT_CODE_STORAGE_CONTRACT = IAccountCodeStorage(address(0x8002)); + + /// Set to `true` for testing logic sanity. + bool isUserSpace; + + modifier validEvmEntry() { + if (!isUserSpace) { + require(gasleft() >= EVM_EMULATOR_STIPEND, "no stipend"); + // Fetch bytecode for the executed contract. + bytes32 bytecodeHash = ACCOUNT_CODE_STORAGE_CONTRACT.getRawCodeHash(address(this)); + require(bytecodeHash != bytes32(0), "called contract not deployed"); + uint bytecodeVersion = uint(bytecodeHash) >> 248; + require(bytecodeVersion == 2, "non-EVM bytecode"); + + // Check that members of the current address are well-defined. + require(address(this).code.length != 0, "invalid code"); + require(address(this).codehash == bytecodeHash, "bytecode hash mismatch"); + } + _; + } + + function testPayment(uint _expectedValue, uint _expectedBalance) public payable validEvmEntry { + require(msg.value == _expectedValue, "unexpected msg.value"); + require(address(this).balance == _expectedBalance, "unexpected balance"); + } + + IRecursiveContract recursionTarget; + + function recurse(uint _depth) public validEvmEntry returns (uint) { + require(gasleft() < 2 * EVM_EMULATOR_STIPEND, "stipend provided multiple times"); + + if (_depth <= 1) { + return 1; + } else { + IRecursiveContract target = (address(recursionTarget) == address(0)) ? this : recursionTarget; + // The real emulator limits amount of gas when performing far calls by EVM gas, so we emulate this behavior as well. + uint gasToSend = isUserSpace ? gasleft() : (gasleft() - EVM_EMULATOR_STIPEND); + return target.recurse{gas: gasToSend}(_depth - 1) * _depth; + } + } + + function testRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + require(recurse(_depth) == _expectedValue, "incorrect recursion"); + } + + function testExternalRecursion(uint _depth, uint _expectedValue) external validEvmEntry returns (uint) { + recursionTarget = new NativeRecursiveContract(IRecursiveContract(this)); + uint returnedValue = recurse(_depth); + recursionTarget = this; // This won't work on revert, but for tests, it's good enough + require(returnedValue == _expectedValue, "incorrect recursion"); + } + + MockContractDeployer constant CONTRACT_DEPLOYER_CONTRACT = MockContractDeployer(address(0x8006)); + + /// Emulates EVM contract deployment and a subsequent call to it in a single transaction. + function testDeploymentAndCall(bytes32 _evmBytecodeHash, bytes calldata _evmBytecode) external validEvmEntry { + IRecursiveContract newContract = IRecursiveContract(CONTRACT_DEPLOYER_CONTRACT.create( + _evmBytecodeHash, + _evmBytecodeHash, + _evmBytecode + )); + require(uint160(address(newContract)) == uint160(address(this)) + 1, "unexpected address"); + require(address(newContract).code.length > 0, "contract code length"); + require(address(newContract).codehash != bytes32(0), "contract code hash"); + + uint gasToSend = gasleft() - EVM_EMULATOR_STIPEND; + require(newContract.recurse{gas: gasToSend}(5) == 120, "unexpected recursive result"); + } + + fallback() external validEvmEntry { + require(msg.data.length == 0, "unsupported call"); + } +} + +contract NativeRecursiveContract is IRecursiveContract { + IRecursiveContract target; + + constructor(IRecursiveContract _target) { + target = _target; + } + + function recurse(uint _depth) external returns (uint) { + require(gasleft() < EVM_EMULATOR_STIPEND, "stipend spilled to native contract"); + return (_depth <= 1) ? 1 : target.recurse(_depth - 1) * _depth; + } +} diff --git a/etc/env/base/contracts.toml b/etc/env/base/contracts.toml index bf69fd48e7b..bda8b88b548 100644 --- a/etc/env/base/contracts.toml +++ b/etc/env/base/contracts.toml @@ -29,8 +29,8 @@ RECURSION_CIRCUITS_SET_VKS_HASH = "0x18c1639094f58177409186e8c48d9f577c9410901d2 GENESIS_TX_HASH = "0xb99ebfea46cbe05a21cd80fe5597d97b204befc52a16303f579c607dc1ac2e2e" PRIORITY_TX_MAX_GAS_LIMIT = 72000000 DEPLOY_L2_BRIDGE_COUNTERPART_GAS_LIMIT = 10000000 -GENESIS_PROTOCOL_VERSION = "25" -GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.25.2" +GENESIS_PROTOCOL_VERSION = "27" +GENESIS_PROTOCOL_SEMANTIC_VERSION = "0.27.0" L1_WETH_BRIDGE_IMPL_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_BRIDGE_PROXY_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" L1_WETH_TOKEN_ADDR = "0x5E6D086F5eC079ADFF4FB3774CDf3e8D6a34F7E9" diff --git a/etc/env/base/eth_sender.toml b/etc/env/base/eth_sender.toml index aed36bb697f..29a4a14e964 100644 --- a/etc/env/base/eth_sender.toml +++ b/etc/env/base/eth_sender.toml @@ -57,8 +57,8 @@ default_priority_fee_per_gas = 1_000_000_000 max_base_fee_samples = 10_000 # These two are parameters of the base_fee_per_gas formula in GasAdjuster. # The possible formulas are: -# 1. base_fee_median * (A + B * time_in_mempool) -# 2. base_fee_median * A * B ^ time_in_mempool +# 1. base_fee_median * (A + B * time_in_mempool_in_l1_blocks) +# 2. base_fee_median * A * B ^ time_in_mempool_in_l1_blocks # Currently the second is used. # To confirm, see core/bin/zksync_core/src/eth_sender/gas_adjuster/mod.rs pricing_formula_parameter_a = 1.5 diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index d8bef020c64..18107f0d4f9 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -1,6 +1,6 @@ # Environment configuration for the Rust code # We don't provide the group name like `[rust]` here, because we don't want -# these variables to be prefixed during the compiling. +# these variables to be prefixed during the compiling. # `RUST_LOG` environment variable for `env_logger` # Here we use TOML multiline strings: newlines will be trimmed. @@ -26,7 +26,6 @@ zksync_node_sync=info,\ zksync_node_consensus=info,\ zksync_contract_verification_server=info,\ zksync_node_api_server=info,\ -zksync_tee_verifier_input_producer=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ diff --git a/etc/env/consensus_config.yaml b/etc/env/consensus_config.yaml index 304ea31fac9..2564865eeb3 100644 --- a/etc/env/consensus_config.yaml +++ b/etc/env/consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3054 server_addr: "127.0.0.1:3054" public_addr: "127.0.0.1:3054" max_payload_size: 2500000 diff --git a/etc/env/en_consensus_config.yaml b/etc/env/en_consensus_config.yaml index f759e72e891..5c428866cb6 100644 --- a/etc/env/en_consensus_config.yaml +++ b/etc/env/en_consensus_config.yaml @@ -1,3 +1,4 @@ +port: 3055 server_addr: '127.0.0.1:3055' public_addr: '127.0.0.1:3055' max_payload_size: 2500000 diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index a4ba8c0201a..587ba4614a5 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -312,7 +312,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset @@ -375,3 +375,10 @@ da_dispatcher: external_proof_integration_api: http_port: 3073 + +consensus: + port: 3054 + server_addr: "127.0.0.1:3054" + public_addr: "127.0.0.1:3054" + max_payload_size: 2500000 + gossip_dynamic_inbound_limit: 100 diff --git a/etc/env/file_based/genesis.yaml b/etc/env/file_based/genesis.yaml index 33634c253ba..212c17c2bf4 100644 --- a/etc/env/file_based/genesis.yaml +++ b/etc/env/file_based/genesis.yaml @@ -1,7 +1,6 @@ genesis_root: 0x526a5d3e384ff95a976283c79a976e0a2fb749e4631233f29d3765201efd937d genesis_batch_commitment: 0xb9794246425fd654cf6a4c2e9adfdd48aaaf97bf3b8ba6bdc88e1d141bcfa5b3 genesis_rollup_leaf_index: 64 -genesis_protocol_version: 25 default_aa_hash: 0x0100055d3993e14104994ca4d8cfa91beb9b544ee86894b45708b4824d832ff2 bootloader_hash: 0x010008c753336bc8d1ddca235602b9f31d346412b2d463cd342899f7bfb73baf l1_chain_id: 9 @@ -9,6 +8,8 @@ l2_chain_id: 270 fee_account: '0x0000000000000000000000000000000000000001' prover: dummy_verifier: true - snark_wrapper_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 -genesis_protocol_semantic_version: 0.25.0 + recursion_scheduler_level_vk_hash: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 +genesis_protocol_semantic_version: 0.27.0 l1_batch_commit_data_generator_mode: Rollup +# TODO: uncomment once EVM emulator is present in the `contracts` submodule +# evm_emulator_hash: 0x01000e53aa35d9d19fa99341c2e2901cf93b3668f01569dd5c6ca409c7696b91 diff --git a/etc/env/file_based/overrides/mainnet.yaml b/etc/env/file_based/overrides/mainnet.yaml index 0600abf694c..7565aac869a 100644 --- a/etc/env/file_based/overrides/mainnet.yaml +++ b/etc/env/file_based/overrides/mainnet.yaml @@ -1,5 +1,6 @@ state_keeper: - block_commit_deadline_ms: 3600000 + # Default batch seal time deadline: 8 hours + block_commit_deadline_ms: 28000000 minimal_l2_gas_price: 45250000 eth: sender: diff --git a/etc/env/file_based/overrides/testnet.yaml b/etc/env/file_based/overrides/testnet.yaml index e4da1ac96e2..d36cf9fc7bc 100644 --- a/etc/env/file_based/overrides/testnet.yaml +++ b/etc/env/file_based/overrides/testnet.yaml @@ -1,5 +1,6 @@ state_keeper: - block_commit_deadline_ms: 3600000 + # Default batch seal time deadline: 8 hours + block_commit_deadline_ms: 28000000 minimal_l2_gas_price: 25000000 eth: sender: diff --git a/etc/lint-config/ignore.yaml b/etc/lint-config/ignore.yaml index 3d0c4869df8..b4456a6c3fd 100644 --- a/etc/lint-config/ignore.yaml +++ b/etc/lint-config/ignore.yaml @@ -2,7 +2,8 @@ files: [ "KeysWithPlonkVerifier.sol", "TokenInit.sol", ".tslintrc.js", - ".prettierrc.js" + ".prettierrc.js", + "era-observability/README.md" ] dirs: [ "target", diff --git a/etc/multivm_bootloaders/vm_gateway/commit b/etc/multivm_bootloaders/vm_gateway/commit new file mode 100644 index 00000000000..a3547f57703 --- /dev/null +++ b/etc/multivm_bootloaders/vm_gateway/commit @@ -0,0 +1 @@ +a8bf0ca28d43899882a2e123e2fdf1379f0fd656 diff --git a/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin index f1e4fea448d..f1b46172d6d 100644 Binary files a/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin and b/etc/multivm_bootloaders/vm_protocol_defense/fee_estimate.yul/fee_estimate.yul.zbin differ diff --git a/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin index febc7363df0..34d17f1752f 100644 Binary files a/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin and b/etc/multivm_bootloaders/vm_protocol_defense/gas_test.yul/gas_test.yul.zbin differ diff --git a/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin index 8a27d4617fd..55d570779dc 100644 Binary files a/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin and b/etc/multivm_bootloaders/vm_protocol_defense/playground_batch.yul/playground_batch.yul.zbin differ diff --git a/etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin b/etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin index c784db5a53e..4ba51692817 100644 Binary files a/etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin and b/etc/multivm_bootloaders/vm_protocol_defense/proved_batch.yul/proved_batch.yul.zbin differ diff --git a/etc/nix/tee_prover.nix b/etc/nix/tee_prover.nix index 0b424522dff..55545d1bb8e 100644 --- a/etc/nix/tee_prover.nix +++ b/etc/nix/tee_prover.nix @@ -1,12 +1,19 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: -craneLib.buildPackage (commonArgs // { +let pname = "zksync_tee_prover"; + cargoExtraArgs = "--locked -p zksync_tee_prover"; +in +craneLib.buildPackage (commonArgs // { + inherit pname; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; - cargoExtraArgs = "-p zksync_tee_prover --bin zksync_tee_prover"; - inherit cargoArtifacts; + inherit cargoExtraArgs; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + inherit pname; + inherit cargoExtraArgs; + }); postInstall = '' strip $out/bin/zksync_tee_prover diff --git a/etc/nix/zksync.nix b/etc/nix/zksync.nix index c5fffc48b09..1ecac58b5d9 100644 --- a/etc/nix/zksync.nix +++ b/etc/nix/zksync.nix @@ -1,12 +1,14 @@ -{ cargoArtifacts -, craneLib +{ craneLib , commonArgs }: craneLib.buildPackage (commonArgs // { pname = "zksync"; version = (builtins.fromTOML (builtins.readFile ../../core/bin/zksync_tee_prover/Cargo.toml)).package.version; cargoExtraArgs = "--all"; - inherit cargoArtifacts; + + cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { + pname = "zksync-era-workspace"; + }); outputs = [ "out" diff --git a/etc/upgrades/1728066632-protocol-defense/common.json b/etc/upgrades/1728066632-protocol-defense/common.json new file mode 100644 index 00000000000..4011159e2dc --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/common.json @@ -0,0 +1,5 @@ +{ + "name": "protocol-defense", + "creationTimestamp": 1728066632, + "protocolVersion": "0.25.0" +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/crypto.json b/etc/upgrades/1728066632-protocol-defense/stage/crypto.json new file mode 100644 index 00000000000..65f8a3cc066 --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/crypto.json @@ -0,0 +1,6 @@ +{ + "verifier": { + "address": "0x06aa7a7B07108F7C5539645e32DD5c21cBF9EB66", + "txHash": "0x1e14eaa49a225d6707016cb7525ba3839e9589c0a85307105d1036133ce6c319" + } +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json b/etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json new file mode 100644 index 00000000000..7389360d64e --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/facetCuts.json @@ -0,0 +1,198 @@ +[ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x90C0A0a63d7ff47BfAA1e9F8fa554dabc986504a", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x81754d2E48e3e553ba6Dfd193FC72B3A0c6076d9", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x922805Cf0C00C9A19C14603529Fb1a6f63861d80", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xBB13642F795014E0EAC2b0d52ECD5162ECb66712", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 0, + "isFreezable": true + } +] \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/facets.json b/etc/upgrades/1728066632-protocol-defense/stage/facets.json new file mode 100644 index 00000000000..acc6456181e --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/facets.json @@ -0,0 +1,18 @@ +{ + "ExecutorFacet": { + "address": "0xBB13642F795014E0EAC2b0d52ECD5162ECb66712", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "AdminFacet": { + "address": "0x90C0A0a63d7ff47BfAA1e9F8fa554dabc986504a", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "GettersFacet": { + "address": "0x81754d2E48e3e553ba6Dfd193FC72B3A0c6076d9", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "MailboxFacet": { + "address": "0x922805Cf0C00C9A19C14603529Fb1a6f63861d80", + "txHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json b/etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json new file mode 100644 index 00000000000..4ebb6009f3f --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/l2Upgrade.json @@ -0,0 +1,394 @@ +{ + "systemContracts": [ + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd" + ], + "address": "0x0000000000000000000000000000000000000000" + }, + { + "name": "Ecrecover", + "bytecodeHashes": [ + "0x010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b" + ], + "address": "0x0000000000000000000000000000000000000001" + }, + { + "name": "SHA256", + "bytecodeHashes": [ + "0x010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a" + ], + "address": "0x0000000000000000000000000000000000000002" + }, + { + "name": "EcAdd", + "bytecodeHashes": [ + "0x01000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b85" + ], + "address": "0x0000000000000000000000000000000000000006" + }, + { + "name": "EcMul", + "bytecodeHashes": [ + "0x010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b66" + ], + "address": "0x0000000000000000000000000000000000000007" + }, + { + "name": "EcPairing", + "bytecodeHashes": [ + "0x01000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b299" + ], + "address": "0x0000000000000000000000000000000000000008" + }, + { + "name": "EmptyContract", + "bytecodeHashes": [ + "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd" + ], + "address": "0x0000000000000000000000000000000000008001" + }, + { + "name": "AccountCodeStorage", + "bytecodeHashes": [ + "0x0100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e1" + ], + "address": "0x0000000000000000000000000000000000008002" + }, + { + "name": "NonceHolder", + "bytecodeHashes": [ + "0x010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c73" + ], + "address": "0x0000000000000000000000000000000000008003" + }, + { + "name": "KnownCodesStorage", + "bytecodeHashes": [ + "0x0100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac9" + ], + "address": "0x0000000000000000000000000000000000008004" + }, + { + "name": "ImmutableSimulator", + "bytecodeHashes": [ + "0x01000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a" + ], + "address": "0x0000000000000000000000000000000000008005" + }, + { + "name": "ContractDeployer", + "bytecodeHashes": [ + "0x010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee" + ], + "address": "0x0000000000000000000000000000000000008006" + }, + { + "name": "L1Messenger", + "bytecodeHashes": [ + "0x010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e" + ], + "address": "0x0000000000000000000000000000000000008008" + }, + { + "name": "MsgValueSimulator", + "bytecodeHashes": [ + "0x0100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f" + ], + "address": "0x0000000000000000000000000000000000008009" + }, + { + "name": "L2BaseToken", + "bytecodeHashes": [ + "0x01000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4" + ], + "address": "0x000000000000000000000000000000000000800a" + }, + { + "name": "SystemContext", + "bytecodeHashes": [ + "0x010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5" + ], + "address": "0x000000000000000000000000000000000000800b" + }, + { + "name": "BootloaderUtilities", + "bytecodeHashes": [ + "0x010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce" + ], + "address": "0x000000000000000000000000000000000000800c" + }, + { + "name": "EventWriter", + "bytecodeHashes": [ + "0x010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98" + ], + "address": "0x000000000000000000000000000000000000800d" + }, + { + "name": "Compressor", + "bytecodeHashes": [ + "0x0100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576" + ], + "address": "0x000000000000000000000000000000000000800e" + }, + { + "name": "ComplexUpgrader", + "bytecodeHashes": [ + "0x0100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d" + ], + "address": "0x000000000000000000000000000000000000800f" + }, + { + "name": "Keccak256", + "bytecodeHashes": [ + "0x0100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b" + ], + "address": "0x0000000000000000000000000000000000008010" + }, + { + "name": "CodeOracle", + "bytecodeHashes": [ + "0x01000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e3" + ], + "address": "0x0000000000000000000000000000000000008012" + }, + { + "name": "P256Verify", + "bytecodeHashes": [ + "0x010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a" + ], + "address": "0x0000000000000000000000000000000000000100" + }, + { + "name": "PubdataChunkPublisher", + "bytecodeHashes": [ + "0x010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db7" + ], + "address": "0x0000000000000000000000000000000000008011" + }, + { + "name": "Create2Factory", + "bytecodeHashes": [ + "0x010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf546" + ], + "address": "0x0000000000000000000000000000000000010000" + } + ], + "defaultAA": { + "name": "DefaultAccount", + "bytecodeHashes": [ + "0x0100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe30" + ] + }, + "bootloader": { + "name": "Bootloader", + "bytecodeHashes": [ + "0x010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b678" + ] + }, + "forcedDeployments": [ + { + "bytecodeHash": "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd", + "newAddress": "0x0000000000000000000000000000000000000000", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b", + "newAddress": "0x0000000000000000000000000000000000000001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a", + "newAddress": "0x0000000000000000000000000000000000000002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b85", + "newAddress": "0x0000000000000000000000000000000000000006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b66", + "newAddress": "0x0000000000000000000000000000000000000007", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b299", + "newAddress": "0x0000000000000000000000000000000000000008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd", + "newAddress": "0x0000000000000000000000000000000000008001", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e1", + "newAddress": "0x0000000000000000000000000000000000008002", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c73", + "newAddress": "0x0000000000000000000000000000000000008003", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac9", + "newAddress": "0x0000000000000000000000000000000000008004", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a", + "newAddress": "0x0000000000000000000000000000000000008005", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee", + "newAddress": "0x0000000000000000000000000000000000008006", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e", + "newAddress": "0x0000000000000000000000000000000000008008", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f", + "newAddress": "0x0000000000000000000000000000000000008009", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4", + "newAddress": "0x000000000000000000000000000000000000800a", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5", + "newAddress": "0x000000000000000000000000000000000000800b", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce", + "newAddress": "0x000000000000000000000000000000000000800c", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98", + "newAddress": "0x000000000000000000000000000000000000800d", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576", + "newAddress": "0x000000000000000000000000000000000000800e", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d", + "newAddress": "0x000000000000000000000000000000000000800f", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x0100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b", + "newAddress": "0x0000000000000000000000000000000000008010", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x01000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e3", + "newAddress": "0x0000000000000000000000000000000000008012", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a", + "newAddress": "0x0000000000000000000000000000000000000100", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db7", + "newAddress": "0x0000000000000000000000000000000000008011", + "value": 0, + "input": "0x", + "callConstructor": false + }, + { + "bytecodeHash": "0x010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf546", + "newAddress": "0x0000000000000000000000000000000000010000", + "value": 0, + "input": "0x", + "callConstructor": false + } + ], + "forcedDeploymentCalldata": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "calldata": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "tx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 25, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "delegatedCalldata": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000" +} \ No newline at end of file diff --git a/etc/upgrades/1728066632-protocol-defense/stage/transactions.json b/etc/upgrades/1728066632-protocol-defense/stage/transactions.json new file mode 100644 index 00000000000..fceeb9bd407 --- /dev/null +++ b/etc/upgrades/1728066632-protocol-defense/stage/transactions.json @@ -0,0 +1,253 @@ +{ + "proposeUpgradeTx": { + "l2ProtocolUpgradeTx": { + "txType": 254, + "from": "0x0000000000000000000000000000000000008007", + "to": "0x0000000000000000000000000000000000008006", + "gasLimit": 72000000, + "gasPerPubdataByteLimit": 800, + "maxFeePerGas": 0, + "maxPriorityFeePerGas": 0, + "paymaster": 0, + "nonce": 25, + "value": 0, + "reserved": [ + 0, + 0, + 0, + 0 + ], + "data": "0xe9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000", + "signature": "0x", + "factoryDeps": [], + "paymasterInput": "0x", + "reservedDynamic": "0x" + }, + "bootloaderHash": "0x010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b678", + "defaultAccountHash": "0x0100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe30", + "verifier": "0x06aa7a7B07108F7C5539645e32DD5c21cBF9EB66", + "verifierParams": { + "recursionNodeLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionLeafLevelVkHash": "0x0000000000000000000000000000000000000000000000000000000000000000", + "recursionCircuitsSetVksHash": "0x0000000000000000000000000000000000000000000000000000000000000000" + }, + "l1ContractsUpgradeCalldata": "0x", + "postUpgradeCalldata": "0x", + "upgradeTimestamp": { + "type": "BigNumber", + "hex": "0x6704ae40" + }, + "factoryDeps": [], + "newProtocolVersion": 107374182400, + "newAllowList": "0x0000000000000000000000000000000000000000" + }, + "l1upgradeCalldata": "0x08284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "upgradeAddress": "0xc11816734C1961ed67a9e2A34d9956eF8d03AD72", + "protocolVersionSemVer": "0.25.0", + "packedProtocolVersion": 107374182400, + "upgradeTimestamp": "1728360000", + "stmUpgradeCalldata": "0x2e52285100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000001800000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000c11816734c1961ed67a9e2a34d9956ef8d03ad72000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000e80000000000000000000000000000000000000000000000000000000000000144000000000000000000000000000000000000000000000000000000000000015e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000922805cf0c00c9a19c14603529fb1a6f63861d80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da43000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b4408284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "chainAdminUpgradeCalldata": "0x69340beb00000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000200000000000000000000000006d6e010a2680e2e5a3b097ce411528b36d880ef60000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000033c4fc57565f000000000000000000000000000000000000000000000000000000180000000200000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000060000000000000000000000000c11816734c1961ed67a9e2a34d9956ef8d03ad72000000000000000000000000000000000000000000000000000000000000180000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000034000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000c400000000000000000000000000000000000000000000000000000000000000e80000000000000000000000000000000000000000000000000000000000000144000000000000000000000000000000000000000000000000000000000000015e00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de350000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da430000000000000000000000000000000000000000000000000000000000000000000000000000000090c0a0a63d7ff47bfaa1e9f8fa554dabc986504a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000d0e18b6810000000000000000000000000000000000000000000000000000000064bf8d6600000000000000000000000000000000000000000000000000000000a9f6d9410000000000000000000000000000000000000000000000000000000027ae4c16000000000000000000000000000000000000000000000000000000004dd18bf5000000000000000000000000000000000000000000000000000000001cc5d10300000000000000000000000000000000000000000000000000000000be6f11cf00000000000000000000000000000000000000000000000000000000e76db86500000000000000000000000000000000000000000000000000000000235d9eb50000000000000000000000000000000000000000000000000000000021f603d7000000000000000000000000000000000000000000000000000000004623c91d000000000000000000000000000000000000000000000000000000001733894500000000000000000000000000000000000000000000000000000000fc57565f0000000000000000000000000000000000000000000000000000000000000000000000000000000081754d2e48e3e553ba6dfd193fc72b3a0c6076d900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000291de72e3400000000000000000000000000000000000000000000000000000000ea6c029c00000000000000000000000000000000000000000000000000000000cdffacc60000000000000000000000000000000000000000000000000000000052ef6b2c00000000000000000000000000000000000000000000000000000000adfca15e000000000000000000000000000000000000000000000000000000007a0ed627000000000000000000000000000000000000000000000000000000006e9960c30000000000000000000000000000000000000000000000000000000098acd7a600000000000000000000000000000000000000000000000000000000086a56f8000000000000000000000000000000000000000000000000000000003591c1a00000000000000000000000000000000000000000000000000000000079823c9a00000000000000000000000000000000000000000000000000000000d86970d800000000000000000000000000000000000000000000000000000000fd791f3c00000000000000000000000000000000000000000000000000000000e5355c75000000000000000000000000000000000000000000000000000000009d1b5a81000000000000000000000000000000000000000000000000000000007b30c8da00000000000000000000000000000000000000000000000000000000d046815600000000000000000000000000000000000000000000000000000000631f4bac000000000000000000000000000000000000000000000000000000000ec6b0b70000000000000000000000000000000000000000000000000000000033ce93fe0000000000000000000000000000000000000000000000000000000006d49e5b00000000000000000000000000000000000000000000000000000000f5c1182c000000000000000000000000000000000000000000000000000000005518c73b00000000000000000000000000000000000000000000000000000000db1f0bf900000000000000000000000000000000000000000000000000000000b8c2f66f00000000000000000000000000000000000000000000000000000000ef3f0bae00000000000000000000000000000000000000000000000000000000fe26699e000000000000000000000000000000000000000000000000000000003960738200000000000000000000000000000000000000000000000000000000af6a2dcd00000000000000000000000000000000000000000000000000000000a1954fc50000000000000000000000000000000000000000000000000000000046657fe90000000000000000000000000000000000000000000000000000000018e3a9410000000000000000000000000000000000000000000000000000000029b98c6700000000000000000000000000000000000000000000000000000000bd7c541200000000000000000000000000000000000000000000000000000000c3bbd2d700000000000000000000000000000000000000000000000000000000e81e0ba100000000000000000000000000000000000000000000000000000000facd743b000000000000000000000000000000000000000000000000000000009cd939e40000000000000000000000000000000000000000000000000000000056142d7a00000000000000000000000000000000000000000000000000000000b22dd78e0000000000000000000000000000000000000000000000000000000074f4d30d00000000000000000000000000000000000000000000000000000000000000000000000000000000922805cf0c00c9a19c14603529fb1a6f63861d80000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000812f43dab000000000000000000000000000000000000000000000000000000006c0960f900000000000000000000000000000000000000000000000000000000b473318e00000000000000000000000000000000000000000000000000000000042901c700000000000000000000000000000000000000000000000000000000263b7f8e00000000000000000000000000000000000000000000000000000000e4948f4300000000000000000000000000000000000000000000000000000000eb67241900000000000000000000000000000000000000000000000000000000c924de3500000000000000000000000000000000000000000000000000000000000000000000000000000000bb13642f795014e0eac2b0d52ecd5162ecb667120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000008701f58c5000000000000000000000000000000000000000000000000000000006edd4f1200000000000000000000000000000000000000000000000000000000c3d93e7c000000000000000000000000000000000000000000000000000000006f497ac6000000000000000000000000000000000000000000000000000000007f61885c00000000000000000000000000000000000000000000000000000000c37533bb0000000000000000000000000000000000000000000000000000000097c09d34000000000000000000000000000000000000000000000000000000000f23da43000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001b4408284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000", + "diamondCut": { + "facetCuts": [ + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x0000000000000000000000000000000000000000", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 2, + "isFreezable": false + }, + { + "facet": "0x90C0A0a63d7ff47BfAA1e9F8fa554dabc986504a", + "selectors": [ + "0x0e18b681", + "0x64bf8d66", + "0xa9f6d941", + "0x27ae4c16", + "0x4dd18bf5", + "0x1cc5d103", + "0xbe6f11cf", + "0xe76db865", + "0x235d9eb5", + "0x21f603d7", + "0x4623c91d", + "0x17338945", + "0xfc57565f" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x81754d2E48e3e553ba6Dfd193FC72B3A0c6076d9", + "selectors": [ + "0x1de72e34", + "0xea6c029c", + "0xcdffacc6", + "0x52ef6b2c", + "0xadfca15e", + "0x7a0ed627", + "0x6e9960c3", + "0x98acd7a6", + "0x086a56f8", + "0x3591c1a0", + "0x79823c9a", + "0xd86970d8", + "0xfd791f3c", + "0xe5355c75", + "0x9d1b5a81", + "0x7b30c8da", + "0xd0468156", + "0x631f4bac", + "0x0ec6b0b7", + "0x33ce93fe", + "0x06d49e5b", + "0xf5c1182c", + "0x5518c73b", + "0xdb1f0bf9", + "0xb8c2f66f", + "0xef3f0bae", + "0xfe26699e", + "0x39607382", + "0xaf6a2dcd", + "0xa1954fc5", + "0x46657fe9", + "0x18e3a941", + "0x29b98c67", + "0xbd7c5412", + "0xc3bbd2d7", + "0xe81e0ba1", + "0xfacd743b", + "0x9cd939e4", + "0x56142d7a", + "0xb22dd78e", + "0x74f4d30d" + ], + "action": 0, + "isFreezable": false + }, + { + "facet": "0x922805Cf0C00C9A19C14603529Fb1a6f63861d80", + "selectors": [ + "0x12f43dab", + "0x6c0960f9", + "0xb473318e", + "0x042901c7", + "0x263b7f8e", + "0xe4948f43", + "0xeb672419", + "0xc924de35" + ], + "action": 0, + "isFreezable": true + }, + { + "facet": "0xBB13642F795014E0EAC2b0d52ECD5162ECb66712", + "selectors": [ + "0x701f58c5", + "0x6edd4f12", + "0xc3d93e7c", + "0x6f497ac6", + "0x7f61885c", + "0xc37533bb", + "0x97c09d34", + "0x0f23da43" + ], + "action": 0, + "isFreezable": true + } + ], + "initAddress": "0xc11816734C1961ed67a9e2A34d9956eF8d03AD72", + "initCalldata": "0x08284e57000000000000000000000000000000000000000000000000000000000000002000000000000000000000000000000000000000000000000000000000000001800000000000000000000000000000000000000000000000000000000000001ac0010008c3be57ae5800e077b6c2056d9d75ad1a7b4f0ce583407961cc6fe0b6780100055dba11508480be023137563caec69debc85f826cb3a4b68246a7cabe3000000000000000000000000006aa7a7b07108f7c5539645e32dd5c21cbf9eb660000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001ae00000000000000000000000000000000000000000000000000000000000001b00000000000000000000000000000000000000000000000000000000006704ae40000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000000000000fe0000000000000000000000000000000000000000000000000000000000008007000000000000000000000000000000000000000000000000000000000000800600000000000000000000000000000000000000000000000000000000044aa2000000000000000000000000000000000000000000000000000000000000000320000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001900000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000026000000000000000000000000000000000000000000000000000000000000018c000000000000000000000000000000000000000000000000000000000000018e0000000000000000000000000000000000000000000000000000000000000190000000000000000000000000000000000000000000000000000000000000019200000000000000000000000000000000000000000000000000000000000001624e9f18c1700000000000000000000000000000000000000000000000000000000000000200000000000000000000000000000000000000000000000000000000000000019000000000000000000000000000000000000000000000000000000000000032000000000000000000000000000000000000000000000000000000000000003e000000000000000000000000000000000000000000000000000000000000004a00000000000000000000000000000000000000000000000000000000000000560000000000000000000000000000000000000000000000000000000000000062000000000000000000000000000000000000000000000000000000000000006e000000000000000000000000000000000000000000000000000000000000007a00000000000000000000000000000000000000000000000000000000000000860000000000000000000000000000000000000000000000000000000000000092000000000000000000000000000000000000000000000000000000000000009e00000000000000000000000000000000000000000000000000000000000000aa00000000000000000000000000000000000000000000000000000000000000b600000000000000000000000000000000000000000000000000000000000000c200000000000000000000000000000000000000000000000000000000000000ce00000000000000000000000000000000000000000000000000000000000000da00000000000000000000000000000000000000000000000000000000000000e600000000000000000000000000000000000000000000000000000000000000f200000000000000000000000000000000000000000000000000000000000000fe000000000000000000000000000000000000000000000000000000000000010a00000000000000000000000000000000000000000000000000000000000001160000000000000000000000000000000000000000000000000000000000000122000000000000000000000000000000000000000000000000000000000000012e000000000000000000000000000000000000000000000000000000000000013a000000000000000000000000000000000000000000000000000000000000014600000000000000000000000000000000000000000000000000000000000001520010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000113d6b03e34605f26aa1fc6fb8953561eb55bb5ea192a5a38f7de3053b00000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000171e4e61b14feacd43cb555bffa5f194d38117132957708dcef83ac15a00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000087be6181fcb16bebb0567c58b658eec345822aec1d42d471e84f758b8500000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000bd553a916fcda3726f7b6b3ccfc17887166982915ced63abc78ba43b6600000000000000000000000000000000000000000000000000000000000000070000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000f1b5f8dd50a00b502d2663746a49a81a01857b6ee1e1b38c9959142b29900000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000078f32964c38fbd138a0369f4723f07ac6f4919c45ef738c18bf874ccd00000000000000000000000000000000000000000000000000000000000080010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005d05a277543946914759aa4a6c403604b828f80d00b900c669c3d224e100000000000000000000000000000000000000000000000000000000000080020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000d97de8c14cd36b1ce06cd7f44a09f6093ec8eb4041629c0fc2116d0c7300000000000000000000000000000000000000000000000000000000000080030000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100006f0f209c9e6d06b1327db1257b15fa7a8b9864ee5ccd12cd3f8bc40ac900000000000000000000000000000000000000000000000000000000000080040000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000039785a8e0d342a49b6b6c6e156801b816434d93bee85d33f56d56b4f9a00000000000000000000000000000000000000000000000000000000000080050000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010004e5d52d692822d5c54ac87de3297f39be0e4a6f72f2830ae5ac856684ee00000000000000000000000000000000000000000000000000000000000080060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010002955993e8ff8190e388e94a6bb791fbe9c6388e5011c52cb587a4ebf05e00000000000000000000000000000000000000000000000000000000000080080000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100005da36075675b98f85fe90df11c1d526f6b12925da3a55a8b9c02aaac5f00000000000000000000000000000000000000000000000000000000000080090000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000103174a90beadc2cffe3e81bdb7b8a576174e888809c1953175fd3015b4000000000000000000000000000000000000000000000000000000000000800a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010001a5eabf9e28288b7ab7e1db316148021347460cfb4314570956867d5af5000000000000000000000000000000000000000000000000000000000000800b0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010007c7bb63f64649098bf75f4baa588db20f445b4d20b7cca972d5d8f973ce000000000000000000000000000000000000000000000000000000000000800c0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000159b30cba9e2096353695b63ca5cbf566416a545a6bcb2ff2e4e672f98000000000000000000000000000000000000000000000000000000000000800d0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100014fb4f05ae09288cbcf4fa7a09ca456910f6e69be5ac2c2dfc8d71d1576000000000000000000000000000000000000000000000000000000000000800e0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100004da9f3aa5e4febcc53522cb7ee6949369fde25dd79e977752b82b9fd5d000000000000000000000000000000000000000000000000000000000000800f0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000000100000ff991d5847f1e9c10c5969d0f03b34a25411ad86d5cb3e0d9c3931e0b00000000000000000000000000000000000000000000000000000000000080100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000000001000023d652655672eafbb0adc385bd423a4a59f752a28f3dde16e74fa205e300000000000000000000000000000000000000000000000000000000000080120000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000116595cfcc96291f95d47ede2ce630f25ccbd7428f00dc7f8135fb565a00000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000470e396f376539289b7975b6866914a8a0994008a02987edac8be81db700000000000000000000000000000000000000000000000000000000000080110000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000010000495bd172e90725e6bfafe73e36a288d616d4673f5347eeb819a78bf54600000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000a00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" + } +} \ No newline at end of file diff --git a/etc/utils/src/index.ts b/etc/utils/src/index.ts index 0e2d5e25eda..6246a209c84 100644 --- a/etc/utils/src/index.ts +++ b/etc/utils/src/index.ts @@ -26,6 +26,7 @@ const IGNORED_DIRS = [ 'cache-zk', // Ignore directories with OZ and forge submodules. 'contracts/l1-contracts/lib', + 'contracts/lib', 'era-observability' ]; const IGNORED_FILES = ['KeysWithPlonkVerifier.sol', 'TokenInit.sol', '.tslintrc.js', '.prettierrc.js']; diff --git a/flake.nix b/flake.nix index ef618816f9c..8c08e880910 100644 --- a/flake.nix +++ b/flake.nix @@ -47,7 +47,7 @@ packages = { # to ease potential cross-compilation, the overlay is used inherit (appliedOverlay.zksync-era) zksync tee_prover container-tee-prover-azure container-tee-prover-dcap; - default = appliedOverlay.zksync-era.zksync; + default = appliedOverlay.zksync-era.tee_prover; }; devShells.default = appliedOverlay.zksync-era.devShell; @@ -91,7 +91,7 @@ ./Cargo.toml ./core ./prover - ./zk_toolbox + ./zkstack_cli ./.github/release-please/manifest.json ]; }; @@ -107,10 +107,6 @@ strictDeps = true; inherit hardeningEnable; }; - - cargoArtifacts = craneLib.buildDepsOnly (commonArgs // { - pname = "zksync-era-workspace"; - }); in { zksync-era = rec { @@ -120,12 +116,11 @@ }; zksync = pkgs.callPackage ./etc/nix/zksync.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; + tee_prover = pkgs.callPackage ./etc/nix/tee_prover.nix { - inherit cargoArtifacts; inherit craneLib; inherit commonArgs; }; diff --git a/infrastructure/protocol-upgrade/README.md b/infrastructure/protocol-upgrade/README.md index da5ee313dab..c7998b96123 100644 --- a/infrastructure/protocol-upgrade/README.md +++ b/infrastructure/protocol-upgrade/README.md @@ -25,13 +25,15 @@ If not provided as arguments, the tool can retrieve certain values from environm 2. `l2rpc` - `API_WEB3_JSON_RPC_HTTP_URL` 3. `create2-address` - `CONTRACTS_CREATE2_FACTORY_ADDR` 4. `zksync-address` - `CONTRACTS_DIAMOND_PROXY_ADDR` -5. `nonce` - Taken from the node via `l1rpc` -6. `gas-price` - Taken from the node via `l1rpc` -7. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, +5. `upgrade-address` - `CONTRACTS_DEFAULT_UPGRADE_ADDR` +6. `l2-upgrader-address` - `CONTRACTS_L2_DEFAULT_UPGRADE_ADDR` +7. `nonce` - Taken from the node via `l1rpc` +8. `gas-price` - Taken from the node via `l1rpc` +9. `environment` - By default, set to `localhost`. Always specify it explicitly. Possible values: `localhost`, `testnet2`, `stage2`, `mainnet2`. Each upgrade on different environments is performed separately since the contract addresses differ between environments. -8. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it - explicitly. +10. `private-key` - If not specified, the default key from the default mnemonic will be used. Always specify it + explicitly. ### Create a Protocol Upgrade Proposal @@ -215,8 +217,7 @@ $ zk f yarn start transactions build-default \ --l2-upgrader-address \ --diamond-upgrade-proposal-id \ --l1rpc \ ---zksync-address \ ---use-new-governance +--zksync-address ``` To execute the `proposeTransparentUpgrade` transaction on L1, use the following command: @@ -228,7 +229,6 @@ $ zk f yarn start transactions propose-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -241,7 +241,6 @@ $ zk f yarn start transactions execute-upgrade \ --gas-price \ --nonce \ --zksync-address \ ---new-governance \ --environment ``` @@ -254,6 +253,5 @@ $ zk f yarn start transactions cancel-upgrade \ --zksync-address \ --gas-price \ --nonce \ ---new-governance \ --environment ``` diff --git a/infrastructure/protocol-upgrade/src/transaction.ts b/infrastructure/protocol-upgrade/src/transaction.ts index dfea3a3bfc3..bd7df8ab456 100644 --- a/infrastructure/protocol-upgrade/src/transaction.ts +++ b/infrastructure/protocol-upgrade/src/transaction.ts @@ -3,12 +3,10 @@ import { ForceDeployUpgraderFactory as ForceDeployUpgraderFactoryL2 } from 'l2-c import { DefaultUpgradeFactory as DefaultUpgradeFactoryL1, AdminFacetFactory, - GovernanceFactory, StateTransitionManagerFactory, ChainAdminFactory } from 'l1-contracts/typechain'; import { FacetCut } from 'l1-contracts/src.ts/diamondCut'; -import { IZkSyncFactory } from '../pre-boojum/IZkSyncFactory'; import { ComplexUpgraderFactory } from 'system-contracts/typechain'; import { getCommonDataFileName, @@ -29,12 +27,26 @@ import * as path from 'path'; const testConfigPath = path.join(process.env.ZKSYNC_HOME as string, `etc/test_config/constant`); const ethTestConfig = JSON.parse(fs.readFileSync(`${testConfigPath}/eth.json`, { encoding: 'utf-8' })); +export enum Action { + Add = 0, + Replace = 1, + Remove = 2 +} + export interface DiamondCutData { facetCuts: FacetCut[]; initAddress: string; initCalldata: string; } +export interface ChainCreationParams { + genesisUpgrade: string; + genesisBatchHash: string; + genesisIndexRepeatedStorageChanges: number; + genesisBatchCommitment: string; + diamondCut: DiamondCutData; +} + export interface ForceDeployment { // The bytecode hash to put on an address bytecodeHash: BytesLike; @@ -89,7 +101,6 @@ export interface ProposedUpgrade { postUpgradeCalldata: BytesLike; upgradeTimestamp: ethers.BigNumber; newProtocolVersion: BigNumberish; - newAllowList: string; } function buildNoopL2UpgradeTx(): L2CanonicalTransaction { @@ -123,10 +134,8 @@ export function buildProposeUpgrade( bootloaderHash?: BytesLike, defaultAccountHash?: BytesLike, verifier?: string, - newAllowList?: string, l2ProtocolUpgradeTx?: L2CanonicalTransaction ): ProposedUpgrade { - newAllowList = newAllowList ?? ethers.constants.AddressZero; bootloaderHash = bootloaderHash ?? ethers.constants.HashZero; defaultAccountHash = defaultAccountHash ?? ethers.constants.HashZero; l1ContractsUpgradeCalldata = l1ContractsUpgradeCalldata ?? '0x'; @@ -142,8 +151,7 @@ export function buildProposeUpgrade( postUpgradeCalldata, upgradeTimestamp, factoryDeps: [], - newProtocolVersion, - newAllowList + newProtocolVersion }; } @@ -171,43 +179,6 @@ export function prepareDefaultCalldataForL2upgrade(forcedDeployments: ForceDeplo return complexUpgraderCalldata; } -interface GovernanceTx { - scheduleCalldata: string; - executeCalldata: string; - operation: any; -} - -function prepareGovernanceTxs(target: string, data: BytesLike): GovernanceTx { - const govCall = { - target: target, - value: 0, - data: data - }; - - const operation = { - calls: [govCall], - predecessor: ethers.constants.HashZero, - salt: ethers.constants.HashZero - }; - - const governance = new GovernanceFactory(); - - // Get transaction data of the `scheduleTransparent` - const scheduleCalldata = governance.interface.encodeFunctionData('scheduleTransparent', [ - operation, - 0 // delay - ]); - - // Get transaction data of the `execute` - const executeCalldata = governance.interface.encodeFunctionData('execute', [operation]); - - return { - scheduleCalldata, - executeCalldata, - operation - }; -} - function prepareChainAdminCalldata(target: string, data: BytesLike): string { const call = { target: target, @@ -221,15 +192,18 @@ function prepareChainAdminCalldata(target: string, data: BytesLike): string { return calldata; } -export function prepareTransparentUpgradeCalldataForNewGovernance( +export function prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, newProtocolVersion, initCalldata, upgradeAddress: string, facetCuts: FacetCut[], - stmAddress: string, zksyncAddress: string, + genesisUpgradeAddress: string, + genesisBatchHash: string, + genesisIndexRepeatedStorageChanges: number, + genesisBatchCommitment: string, prepareDirectOperation?: boolean, chainId?: string ) { @@ -238,6 +212,21 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( initAddress: upgradeAddress, initCalldata }; + + let chainCreationDiamondCut: DiamondCutData = { + facetCuts: facetCuts.filter((cut) => cut.action == Action.Add), + initAddress: genesisUpgradeAddress, + initCalldata: '0x' + }; + + let chainCreationParams: ChainCreationParams = { + genesisUpgrade: genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, + diamondCut: chainCreationDiamondCut + }; + // Prepare calldata for STM let stm = new StateTransitionManagerFactory(); const stmUpgradeCalldata = stm.interface.encodeFunctionData('setNewVersionUpgrade', [ @@ -247,8 +236,9 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( newProtocolVersion ]); - const { scheduleCalldata: stmScheduleTransparentOperation, executeCalldata: stmExecuteOperation } = - prepareGovernanceTxs(stmAddress, stmUpgradeCalldata); + const stmSetChainCreationCalldata = stm.interface.encodeFunctionData('setChainCreationParams', [ + chainCreationParams + ]); // Prepare calldata for upgrading diamond proxy let adminFacet = new AdminFacetFactory(); @@ -257,30 +247,13 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( diamondCut ]); - const { - scheduleCalldata: scheduleTransparentOperation, - executeCalldata: executeOperation, - operation: governanceOperation - } = prepareGovernanceTxs(zksyncAddress, diamondProxyUpgradeCalldata); - - const newExecuteChainUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); - - const legacyScheduleTransparentOperation = adminFacet.interface.encodeFunctionData('executeUpgrade', [diamondCut]); - const { scheduleCalldata: legacyScheduleOperation, executeCalldata: legacyExecuteOperation } = prepareGovernanceTxs( - zksyncAddress, - legacyScheduleTransparentOperation - ); + const chainAdminUpgradeCalldata = prepareChainAdminCalldata(zksyncAddress, diamondProxyUpgradeCalldata); let result: any = { - stmScheduleTransparentOperation, - stmExecuteOperation, - scheduleTransparentOperation, - executeOperation, - newExecuteChainUpgradeCalldata, + stmUpgradeCalldata, + chainAdminUpgradeCalldata, diamondCut, - governanceOperation, - legacyScheduleOperation, - legacyExecuteOperation + stmSetChainCreationCalldata }; if (prepareDirectOperation) { @@ -290,13 +263,9 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( const stmDirecUpgradeCalldata = stm.interface.encodeFunctionData('executeUpgrade', [chainId, diamondCut]); - const { scheduleCalldata: stmScheduleOperationDirect, executeCalldata: stmExecuteOperationDirect } = - prepareGovernanceTxs(stmAddress, stmDirecUpgradeCalldata); - result = { ...result, - stmScheduleOperationDirect, - stmExecuteOperationDirect + stmDirecUpgradeCalldata }; } @@ -305,16 +274,16 @@ export function prepareTransparentUpgradeCalldataForNewGovernance( export function buildDefaultUpgradeTx( environment, - diamondUpgradeProposalId, upgradeAddress, - l2UpgraderAddress, oldProtocolVersion, oldProtocolVersionDeadline, upgradeTimestamp, - newAllowList, - stmAddress, zksyncAddress, postUpgradeCalldataFlag, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation?, chainId? ) { @@ -389,21 +358,23 @@ export function buildDefaultUpgradeTx( bootloaderHash, defaultAAHash, cryptoVerifierAddress, - newAllowList, l2UpgradeTx ); let l1upgradeCalldata = prepareDefaultCalldataForL1upgrade(proposeUpgradeTx); - let upgradeData = prepareTransparentUpgradeCalldataForNewGovernance( + let upgradeData = prepareUpgradeCalldata( oldProtocolVersion, oldProtocolVersionDeadline, packedNewProtocolVersion, l1upgradeCalldata, upgradeAddress, facetCuts, - stmAddress, zksyncAddress, + genesisUpgradeAddress, + genesisBatchHash, + genesisIndexRepeatedStorageChanges, + genesisBatchCommitment, prepareDirectOperation, chainId ); @@ -414,7 +385,6 @@ export function buildDefaultUpgradeTx( upgradeAddress, protocolVersionSemVer: newProtocolVersionSemVer, packedProtocolVersion: packedNewProtocolVersion, - diamondUpgradeProposalId, upgradeTimestamp, ...upgradeData }; @@ -423,31 +393,6 @@ export function buildDefaultUpgradeTx( console.log('Default upgrade transactions are generated'); } -async function sendTransaction( - calldata: BytesLike, - privateKey: string, - l1rpc: string, - to: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number -) { - const wallet = getWallet(l1rpc, privateKey); - gasPrice = gasPrice ?? (await wallet.provider.getGasPrice()); - nonce = nonce ?? (await wallet.getTransactionCount()); - const tx = await wallet.sendTransaction({ - to, - data: calldata, - value: 0, - gasLimit: 10_000_000, - gasPrice, - nonce - }); - console.log('Transaction hash: ', tx.hash); - await tx.wait(); - console.log('Transaction is executed'); -} - export function getWallet(l1rpc, privateKey) { if (!l1rpc) { l1rpc = web3Url(); @@ -462,99 +407,6 @@ export function getWallet(l1rpc, privateKey) { ).connect(provider); } -async function sendPreparedTx( - privateKey: string, - l1rpc: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - governanceAddr: string, - transactionsJsonField: string, - logText: string -) { - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - const calldata = transactions[transactionsJsonField]; - - console.log(`${logText} for protocolVersion ${transactions.protocolVersion}`); - await sendTransaction(calldata, privateKey, l1rpc, governanceAddr, environment, gasPrice, nonce); -} - -async function cancelUpgrade( - privateKey: string, - l1rpc: string, - zksyncAddress: string, - environment: string, - gasPrice: ethers.BigNumber, - nonce: number, - execute: boolean, - newGovernanceAddress: string -) { - if (newGovernanceAddress != null) { - let wallet = getWallet(l1rpc, privateKey); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - let governance = GovernanceFactory.connect(newGovernanceAddress, wallet); - const operation = transactions.governanceOperation; - - const operationId = await governance.hashOperation(operation); - - console.log(`Cancel upgrade operation with id: ${operationId}`); - if (execute) { - const tx = await governance.cancel(operationId); - await tx.wait(); - console.log('Operation canceled'); - } else { - const calldata = governance.interface.encodeFunctionData('cancel', [operationId]); - console.log(`Cancel upgrade calldata: ${calldata}`); - } - } else { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - let wallet = getWallet(l1rpc, privateKey); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - const transactions = JSON.parse(fs.readFileSync(getL2TransactionsFileName(environment)).toString()); - - const transparentUpgrade = transactions.transparentUpgrade; - const diamondUpgradeProposalId = transactions.diamondUpgradeProposalId; - - const proposalHash = await zkSync.upgradeProposalHash( - transparentUpgrade, - diamondUpgradeProposalId, - ethers.constants.HashZero - ); - - console.log(`Cancel upgrade with hash: ${proposalHash}`); - let cancelUpgradeCalldata = zkSync.interface.encodeFunctionData('cancelUpgradeProposal', [proposalHash]); - if (execute) { - await sendTransaction( - cancelUpgradeCalldata, - privateKey, - l1rpc, - zksyncAddress, - environment, - gasPrice, - nonce - ); - } else { - console.log(`Cancel upgrade calldata: ${cancelUpgradeCalldata}`); - } - } -} - -async function getNewDiamondUpgradeProposalId(l1rpc: string, zksyncAddress: string) { - zksyncAddress = zksyncAddress ?? process.env.CONTRACTS_DIAMOND_PROXY_ADDR; - // We don't care about the wallet here, we just need to make a get call. - let wallet = getWallet(l1rpc, undefined); - let zkSync = IZkSyncFactory.connect(zksyncAddress, wallet); - let proposalId = await zkSync.getCurrentProposalId(); - proposalId = proposalId.add(1); - console.log( - `New proposal id: ${proposalId} for ${zksyncAddress} network: ${JSON.stringify( - await wallet.provider.getNetwork() - )}` - ); - return proposalId; -} - export const command = new Command('transactions').description( 'prepare the transactions and their calldata for the upgrade' ); @@ -564,223 +416,31 @@ command .requiredOption('--upgrade-timestamp ') .option('--upgrade-address ') .option('--environment ') - .option('--new-allow-list ') - .option('--l2-upgrader-address ') - .option('--diamond-upgrade-proposal-id ') .option('--old-protocol-version ') .option('--old-protocol-version-deadline ') .option('--l1rpc ') .option('--zksync-address ') - .option('--state-transition-manager-address ') .option('--chain-id ') .option('--prepare-direct-operation ') - .option('--use-new-governance') - .option('--post-upgrade-calldata') + .option('--post-upgrade-calldata ') + .option('--genesis-upgrade-address ') + .option('--genesis-batch-hash ') + .option('--genesis-index-repeated-storage-changes ') + .option('--genesis-batch-commitment ') .action(async (options) => { - if (!options.useNewGovernance) { - // TODO(X): remove old governance functionality from the protocol upgrade tool - throw new Error('Old governance is not supported anymore'); - } - - let diamondUpgradeProposalId = options.diamondUpgradeProposalId; - if (!diamondUpgradeProposalId && !options.useNewGovernance) { - diamondUpgradeProposalId = await getNewDiamondUpgradeProposalId(options.l1rpc, options.zksyncAddress); - } - buildDefaultUpgradeTx( options.environment, - diamondUpgradeProposalId, options.upgradeAddress, - options.l2UpgraderAddress, options.oldProtocolVersion, options.oldProtocolVersionDeadline, options.upgradeTimestamp, - options.newAllowList, - options.stateTransitionManagerAddress, options.zksyncAddress, options.postUpgradeCalldata, + options.genesisUpgradeAddress, + options.genesisBatchHash, + options.genesisIndexRepeatedStorageChanges, + options.genesisBatchCommitment, options.prepareDirectOperation, options.chainId ); }); - -command - .command('propose-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleTransparentOperation', - 'Proposing upgrade for STM' - ); - }); - -command - .command('execute-upgrade-stm') - .option('--environment ') - .option('--private-key ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperation', - 'Executing upgrade for STM' - ); - }); - -command - .command('propose-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'scheduleTransparentOperation', - 'Proposing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('execute-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'executeOperation', - 'Executing "upgradeChainFromVersion" upgrade' - ); - }); - -command - .command('propose-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmScheduleOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('execute-upgrade-direct') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await sendPreparedTx( - options.privateKey, - options.l1rpc, - options.environment, - options.gasPrice, - options.nonce, - options.governanceAddr, - 'stmExecuteOperationDirect', - 'Executing direct upgrade via STM' - ); - }); - -command - .command('cancel-upgrade') - .option('--environment ') - .option('--private-key ') - .option('--zksync-address ') - .option('--gas-price ') - .option('--nonce ') - .option('--l1rpc ') - .option('--execute') - .option('--governance-addr ') - .action(async (options) => { - if (!options.governanceAddr) { - throw new Error('Governance address must be provided'); - } - - await cancelUpgrade( - options.privateKey, - options.l1rpc, - options.zksyncAddress, - options.environment, - options.gasPrice, - options.nonce, - options.execute, - options.newGovernance - ); - }); diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 063777a671b..dc716a0b257 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -16,7 +16,8 @@ const IMAGES = [ 'prover-job-monitor', 'proof-fri-gpu-compressor', 'snapshots-creator', - 'verified-sources-fetcher' + 'verified-sources-fetcher', + 'prover-autoscaler' ]; const DOCKER_REGISTRIES = ['us-docker.pkg.dev/matterlabs-infra/matterlabs-docker', 'matterlabs']; @@ -76,7 +77,8 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'contract-verifier', 'prover-fri-gateway', 'prover-job-monitor', - 'snapshots-creator' + 'snapshots-creator', + 'prover-autoscaler' ].includes(image) ? ['latest', 'latest2.0', `2.0-${imageTagSha}`, `${imageTagSha}`, `2.0-${imageTagShaTS}`, `${imageTagShaTS}`] : [`latest2.0`, 'latest']; diff --git a/infrastructure/zk/src/fmt.ts b/infrastructure/zk/src/fmt.ts index e58cdbc8e54..b9f7f1b9d60 100644 --- a/infrastructure/zk/src/fmt.ts +++ b/infrastructure/zk/src/fmt.ts @@ -48,7 +48,7 @@ export async function rustfmt(check: boolean = false) { const dirs = [ process.env.ZKSYNC_HOME as string, `${process.env.ZKSYNC_HOME}/prover`, - `${process.env.ZKSYNC_HOME}/zk_toolbox` + `${process.env.ZKSYNC_HOME}/zkstack_cli` ]; for (const dir of dirs) { diff --git a/infrastructure/zk/src/lint.ts b/infrastructure/zk/src/lint.ts index 7a24881c0f9..49ae4d0753e 100644 --- a/infrastructure/zk/src/lint.ts +++ b/infrastructure/zk/src/lint.ts @@ -38,12 +38,12 @@ async function proverClippy() { await utils.spawn('cargo clippy --tests --locked -- -D warnings'); } -async function toolboxClippy() { - process.chdir(`${process.env.ZKSYNC_HOME}/zk_toolbox`); +async function zkstackClippy() { + process.chdir(`${process.env.ZKSYNC_HOME}/zkstack_cli`); await utils.spawn('cargo clippy --tests --locked -- -D warnings'); } -const ARGS = [...EXTENSIONS, 'rust', 'prover', 'contracts', 'toolbox'] as const; +const ARGS = [...EXTENSIONS, 'rust', 'prover', 'contracts', 'zkstack_cli'] as const; export const command = new Command('lint') .description('lint code') @@ -61,8 +61,8 @@ export const command = new Command('lint') case 'contracts': await lintContracts(cmd.check); break; - case 'toolbox': - await toolboxClippy(); + case 'zkstack_cli': + await zkstackClippy(); break; default: await lint(extension, cmd.check); @@ -72,7 +72,7 @@ export const command = new Command('lint') promises.push(lintContracts(cmd.check)); promises.push(clippy()); promises.push(proverClippy()); - promises.push(toolboxClippy()); + promises.push(zkstackClippy()); await Promise.all(promises); } }); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index d433c4e1a3b..928d105582f 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -46,6 +46,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "getrandom", "once_cell", "version_check", "zerocopy", @@ -92,9 +93,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.15" +version = "0.6.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" +checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" dependencies = [ "anstyle", "anstyle-parse", @@ -107,33 +108,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.8" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" +checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" [[package]] name = "anstyle-parse" -version = "0.2.5" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" +checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" +checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.4" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" +checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -168,9 +169,9 @@ dependencies = [ [[package]] name = "arrayref" -version = "0.3.8" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d151e35f61089500b617991b791fc8bfd237ae50cd5950803758a179b41e67a" +checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" [[package]] name = "arrayvec" @@ -208,6 +209,18 @@ dependencies = [ "wait-timeout", ] +[[package]] +name = "async-broadcast" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20cd0e2e25ea8e5f7e9df04578dc6cf5c83577fd09b1a46aaf5c85e1c33f2a7e" +dependencies = [ + "event-listener", + "event-listener-strategy", + "futures-core", + "pin-project-lite", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -225,20 +238,20 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "async-trait" -version = "0.1.81" +version = "0.1.80" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e0c28dcc82d7c8ead5cb13beb15405b57b8546e93215673ff8ca0349a028107" +checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -287,11 +300,11 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.20.0" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e89b6941c2d1a7045538884d6e760ccfffdf8e1ffc2613d8efa74305e1f3752" +checksum = "0f0e249228c6ad2d240c2dc94b714d711629d52bad946075d8e9b2f5391f0703" dependencies = [ - "bindgen 0.69.4", + "bindgen", "cc", "cmake", "dunce", @@ -311,9 +324,9 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-util", "itoa", "matchit", @@ -344,7 +357,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", "mime", "pin-project-lite", @@ -355,11 +368,22 @@ dependencies = [ "tracing", ] +[[package]] +name = "backoff" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" +dependencies = [ + "getrandom", + "instant", + "rand 0.8.5", +] + [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" dependencies = [ "addr2line", "cc", @@ -423,7 +447,7 @@ checksum = "51d712318a27c7150326677b321a5fa91b55f6d9034ffd67f20319e147d40cee" dependencies = [ "autocfg", "libm", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-integer", "num-traits", ] @@ -437,29 +461,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bindgen" -version = "0.59.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" -dependencies = [ - "bitflags 1.3.2", - "cexpr", - "clang-sys", - "clap 2.34.0", - "env_logger 0.9.3", - "lazy_static", - "lazycell", - "log", - "peeking_take_while", - "proc-macro2 1.0.86", - "quote 1.0.36", - "regex", - "rustc-hash", - "shlex", - "which", -] - [[package]] name = "bindgen" version = "0.69.4" @@ -474,12 +475,12 @@ dependencies = [ "lazycell", "log", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "regex", "rustc-hash", "shlex", - "syn 2.0.72", + "syn 2.0.66", "which", ] @@ -650,9 +651,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f9a6d958dd58a0899737e5a1fc6597aefcf7980bf8be5be5329e701cbd45ca" +checksum = "98c681a3f867afe40bcc188e5cb5260bbf5699531823affa3cbe28f7ca9b7bc9" dependencies = [ "boojum", "cmake", @@ -664,25 +665,25 @@ dependencies = [ [[package]] name = "borsh" -version = "1.5.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028" dependencies = [ "borsh-derive", - "cfg_aliases 0.2.1", + "cfg_aliases", ] [[package]] name = "borsh-derive" -version = "1.5.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" +checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0" dependencies = [ "once_cell", - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro-crate 2.0.0", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", "syn_derive", ] @@ -693,7 +694,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", - "regex-automata 0.4.7", + "regex-automata 0.4.6", "serde", ] @@ -726,7 +727,7 @@ version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -781,12 +782,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" -[[package]] -name = "cfg_aliases" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" - [[package]] name = "chrono" version = "0.4.38" @@ -799,16 +794,16 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] name = "circuit_definitions" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b532214f063e5e0ee5c0fc1d3afd56dec541efa68b8985f14cc55cc324f4c48" +checksum = "492404ea63c934d8e894325f0a741723bf91cd035cb34a92fddd8617c4a00fd3" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.6", "crossbeam", "derivative", "seq-macro", @@ -854,14 +849,14 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e67617688c66640c84f9b98ff26d48f7898dca4faeb45241a4f21ec333788e7b" +checksum = "f5128d4b8fbb27ac453f573a95601058e74487bdafd22a3168cded66bf340c28" dependencies = [ "derivative", "serde", - "zk_evm 0.150.5", - "zkevm_circuits 0.150.5", + "zk_evm 0.150.6", + "zkevm_circuits 0.150.6", ] [[package]] @@ -921,11 +916,11 @@ dependencies = [ [[package]] name = "circuit_sequencer_api" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21017310971d4a051e4a52ad70eed11d1ae69defeca8314f73a3a4bad16705a9" +checksum = "093d0c2c0b39144ddb4e1e88d73d95067ce34ec7750808b2eed01edbb510b88e" dependencies = [ - "circuit_encodings 0.150.5", + "circuit_encodings 0.150.6", "derivative", "rayon", "serde", @@ -960,9 +955,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.11" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35723e6a11662c2afb578bcf0b88bf6ea8e21282a953428f240574fcc3a2b5b3" +checksum = "90bc066a67923782aa8515dbaea16946c5bcc5addbd668bb80af688e53e548a0" dependencies = [ "clap_builder", "clap_derive", @@ -970,9 +965,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.11" +version = "4.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49eb96cbfa7cfa35017b7cd548c75b14c3118c98b423041d70562665e07fb0fa" +checksum = "ae129e2e766ae0ec03484e609954119f123cc1fe650337e155d03b022f24f7b4" dependencies = [ "anstream", "anstyle", @@ -982,21 +977,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.11" +version = "4.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d029b67f89d30bbb547c89fd5161293c0aec155fc691d7924b64550662db93e" +checksum = "528131438037fd55894f62d6e9f068b8f45ac57ffa77517819645d10aed04f64" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "clap_lex" -version = "0.7.2" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" +checksum = "98cc8fbded0c607b7ba9dd60cd98df59af97e84d24e49c8557331cfc26d301ce" [[package]] name = "cmake" @@ -1018,9 +1013,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.2" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" +checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" [[package]] name = "colored" @@ -1097,7 +1092,7 @@ version = "0.2.32" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "unicode-xid 0.2.4", ] @@ -1279,7 +1274,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24cf603ca4299c6e20e644da88897f7b81d688510f4887e818b0bfe0b792081b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -1291,7 +1286,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -1326,9 +1321,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -1337,8 +1332,18 @@ version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a01d95850c592940db9b8194bc39f4bc0e89dee5c4265e4b1807c34a9aba453c" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.13.4", + "darling_macro 0.13.4", +] + +[[package]] +name = "darling" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6f63b86c8a8826a49b8c21f08a2d07338eec8d900540f8630dc76284be802989" +dependencies = [ + "darling_core 0.20.10", + "darling_macro 0.20.10", ] [[package]] @@ -1349,23 +1354,57 @@ checksum = "859d65a907b6852c9361e3185c862aae7fafd2887876799fa55f5f99dc40d610" dependencies = [ "fnv", "ident_case", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "strsim 0.10.0", "syn 1.0.109", ] +[[package]] +name = "darling_core" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95133861a8032aaea082871032f5815eb9e98cef03fa916ab4500513994df9e5" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2 1.0.85", + "quote 1.0.36", + "strsim 0.11.1", + "syn 2.0.66", +] + [[package]] name = "darling_macro" version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ - "darling_core", + "darling_core 0.13.4", "quote 1.0.36", "syn 1.0.109", ] +[[package]] +name = "darling_macro" +version = "0.20.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" +dependencies = [ + "darling_core 0.20.10", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "debug-map-sorted" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75c7dfa83618734bf9fa07aadaa1166b634e9427bb9bc5a1c2332d04d73fb721" +dependencies = [ + "itertools 0.10.5", +] + [[package]] name = "debugid" version = "0.8.0" @@ -1413,7 +1452,7 @@ version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -1433,9 +1472,9 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", "unicode-xid 0.2.4", ] @@ -1509,6 +1548,12 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" + [[package]] name = "ecdsa" version = "0.14.8" @@ -1562,9 +1607,9 @@ dependencies = [ [[package]] name = "either" -version = "1.13.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" +checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" dependencies = [ "serde", ] @@ -1640,16 +1685,16 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" dependencies = [ "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "env_filter" -version = "0.1.2" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f2c92ceda6ceec50f43169f9ee8424fe2db276791afde7b2cd8bc084cb376ab" +checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" dependencies = [ "log", ] @@ -1669,9 +1714,9 @@ dependencies = [ [[package]] name = "env_logger" -version = "0.11.5" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13fa619b91fb2381732789fc5de83b45675e882f66623b7d8cb4f643017018d" +checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" dependencies = [ "anstream", "anstyle", @@ -1696,9 +1741,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51f0d6e329b2c11d134c3140951209be968ef316ed64ddde75640eaed7f10264" +checksum = "c1e1990fee6e9d25b40524ce53ca7977a211155a17bc7277f4dd354633e4fc22" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1707,9 +1752,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "060e8186234c7a281021fb95614e06e94e1fc7ab78938360a5c27af0f8fc6105" +checksum = "d84e8d300c28cd91ceb56340f66da8607409f44a45f5e694e23723630db8c852" dependencies = [ "serde_json", ] @@ -1790,6 +1835,16 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "event-listener-strategy" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0f214dc438f977e6d4e3500aaa277f5ad94ca83fbbd9b1a15713ce2344ccc5a1" +dependencies = [ + "event-listener", + "pin-project-lite", +] + [[package]] name = "fastrand" version = "2.1.0" @@ -1868,6 +1923,15 @@ dependencies = [ "miniz_oxide", ] +[[package]] +name = "fluent-uri" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "17c704e9dbe1ddd863da1e6ff3567795087b1eb201ce80d8fa81162e1516500d" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "flume" version = "0.11.0" @@ -1928,7 +1992,7 @@ dependencies = [ "indexmap 1.9.3", "itertools 0.10.5", "lazy_static", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-derive", "num-integer", "num-traits", @@ -2043,9 +2107,9 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -2306,6 +2370,20 @@ dependencies = [ "tracing", ] +[[package]] +name = "handlebars" +version = "3.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4498fc115fa7d34de968184e473529abb40eeb6be8bc5f7faba3d08c316cb3e3" +dependencies = [ + "log", + "pest", + "pest_derive", + "quick-error 2.0.1", + "serde", + "serde_json", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -2334,6 +2412,30 @@ dependencies = [ "hashbrown 0.14.5", ] +[[package]] +name = "headers" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "322106e6bd0cba2d5ead589ddb8150a13d7c4217cf80d7c4f682ca994ccc6aa9" +dependencies = [ + "base64 0.21.7", + "bytes", + "headers-core", + "http 1.1.0", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http 1.1.0", +] + [[package]] name = "heck" version = "0.3.3" @@ -2443,9 +2545,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.1" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" +checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" dependencies = [ "bytes", "http 1.1.0", @@ -2460,15 +2562,15 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.9.4" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -2484,9 +2586,9 @@ checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" [[package]] name = "hyper" -version = "0.14.30" +version = "0.14.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" +checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" dependencies = [ "bytes", "futures-channel", @@ -2508,16 +2610,16 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.1" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" +checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" dependencies = [ "bytes", "futures-channel", "futures-util", "h2 0.4.5", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "httparse", "httpdate", "itoa", @@ -2527,6 +2629,26 @@ dependencies = [ "want", ] +[[package]] +name = "hyper-http-proxy" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d06dbdfbacf34d996c6fb540a71a684a7aae9056c71951163af8a8a4c07b9a4" +dependencies = [ + "bytes", + "futures-util", + "headers", + "http 1.1.0", + "hyper 1.3.1", + "hyper-rustls", + "hyper-util", + "pin-project-lite", + "rustls-native-certs", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "hyper-rustls" version = "0.27.2" @@ -2535,10 +2657,11 @@ checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-util", "log", "rustls", + "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", @@ -2551,7 +2674,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.4.1", + "hyper 1.3.1", "hyper-util", "pin-project-lite", "tokio", @@ -2565,7 +2688,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.30", + "hyper 0.14.29", "native-tls", "tokio", "tokio-native-tls", @@ -2579,7 +2702,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-util", "native-tls", "tokio", @@ -2589,16 +2712,16 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.1", - "hyper 1.4.1", + "http-body 1.0.0", + "hyper 1.3.1", "pin-project-lite", "socket2", "tokio", @@ -2679,7 +2802,7 @@ version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -2716,6 +2839,15 @@ dependencies = [ "regex", ] +[[package]] +name = "instant" +version = "0.1.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" +dependencies = [ + "cfg-if", +] + [[package]] name = "ipnet" version = "2.9.0" @@ -2733,9 +2865,9 @@ dependencies = [ [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" [[package]] name = "itertools" @@ -2812,9 +2944,9 @@ checksum = "8eaf4bc02d17cbdd7ff4c7438cafcdf7fb9a4613313ad11b4f8fefe7d3fa0130" [[package]] name = "jobserver" -version = "0.1.32" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" +checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" dependencies = [ "libc", ] @@ -2828,6 +2960,44 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "json-patch" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5b1fb8864823fad91877e6caea0baca82e49e8db50f8e5c9f9a453e27d3330fc" +dependencies = [ + "jsonptr", + "serde", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonpath-rust" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19d8fe85bd70ff715f31ce8c739194b423d79811a19602115d611a3ec85d6200" +dependencies = [ + "lazy_static", + "once_cell", + "pest", + "pest_derive", + "regex", + "serde_json", + "thiserror", +] + +[[package]] +name = "jsonptr" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1c6e529149475ca0b2820835d3dce8fcc41c6b943ca608d32f35b449255e4627" +dependencies = [ + "fluent-uri", + "serde", + "serde_json", +] + [[package]] name = "jsonrpsee" version = "0.23.2" @@ -2882,7 +3052,7 @@ dependencies = [ "futures-timer", "futures-util", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", "jsonrpsee-types", "pin-project", @@ -2904,8 +3074,8 @@ checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" dependencies = [ "async-trait", "base64 0.22.1", - "http-body 1.0.1", - "hyper 1.4.1", + "http-body 1.0.0", + "hyper 1.3.1", "hyper-rustls", "hyper-util", "jsonrpsee-core", @@ -2929,9 +3099,9 @@ checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ "heck 0.5.0", "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3013,12 +3183,135 @@ dependencies = [ ] [[package]] -name = "keccak" -version = "0.1.5" +name = "k8s-openapi" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8847402328d8301354c94d605481f25a6bdc1ed65471fd96af8eca71141b13" +dependencies = [ + "base64 0.22.1", + "chrono", + "serde", + "serde-value", + "serde_json", +] + +[[package]] +name = "keccak" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +dependencies = [ + "cpufeatures", +] + +[[package]] +name = "kube" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa21063c854820a77c5d7f8deeb7ffa55246d8304e4bcd8cce2956752c6604f8" +dependencies = [ + "k8s-openapi", + "kube-client", + "kube-core", + "kube-derive", + "kube-runtime", +] + +[[package]] +name = "kube-client" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "31c2355f5c9d8a11900e71a6fe1e47abd5ec45bf971eb4b162ffe97b46db9bb7" +dependencies = [ + "base64 0.22.1", + "bytes", + "chrono", + "either", + "futures 0.3.30", + "home", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "hyper 1.3.1", + "hyper-http-proxy", + "hyper-rustls", + "hyper-timeout", + "hyper-util", + "jsonpath-rust", + "k8s-openapi", + "kube-core", + "pem", + "rustls", + "rustls-pemfile 2.1.2", + "secrecy", + "serde", + "serde_json", + "serde_yaml", + "thiserror", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tracing", +] + +[[package]] +name = "kube-core" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3030bd91c9db544a50247e7d48d7db9cf633c172732dce13351854526b1e666" +dependencies = [ + "chrono", + "form_urlencoded", + "http 1.1.0", + "json-patch", + "k8s-openapi", + "schemars", + "serde", + "serde-value", + "serde_json", + "thiserror", +] + +[[package]] +name = "kube-derive" +version = "0.95.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa98be978eddd70a773aa8e86346075365bfb7eb48783410852dbf7cb57f0c27" +dependencies = [ + "darling 0.20.10", + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_json", + "syn 2.0.66", +] + +[[package]] +name = "kube-runtime" +version = "0.95.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "5895cb8aa641ac922408f128b935652b34c2995f16ad7db0984f6caa50217914" dependencies = [ - "cpufeatures", + "ahash 0.8.11", + "async-broadcast", + "async-stream", + "async-trait", + "backoff", + "derivative", + "futures 0.3.30", + "hashbrown 0.14.5", + "json-patch", + "jsonptr", + "k8s-openapi", + "kube-client", + "parking_lot", + "pin-project", + "serde", + "serde_json", + "thiserror", + "tokio", + "tokio-util", + "tracing", ] [[package]] @@ -3044,12 +3337,12 @@ checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" [[package]] name = "libloading" -version = "0.8.5" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" +checksum = "0c2a198fb6b0eada2a8df47933734e6d35d350665a33a3593d7164fa52c75c19" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.52.5", ] [[package]] @@ -3099,9 +3392,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.22" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "logos" @@ -3120,10 +3413,10 @@ checksum = "dc487311295e0002e452025d6b580b77bb17286de87b57138f3b5db711cded68" dependencies = [ "beef", "fnv", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "regex-syntax 0.6.29", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3174,9 +3467,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "miette" @@ -3196,9 +3489,9 @@ version = "5.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3209,9 +3502,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.5" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" +checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" dependencies = [ "mime", "unicase", @@ -3225,23 +3518,22 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.4" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" dependencies = [ "adler", ] [[package]] name = "mio" -version = "1.0.1" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ - "hermit-abi 0.3.9", "libc", "wasi", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] @@ -3292,7 +3584,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c168194d373b1e134786274020dae7fc5513d565ea2ebb9bc9ff17ffb69106d4" dependencies = [ "either", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "serde", "syn 1.0.109", @@ -3306,7 +3598,7 @@ checksum = "ab2156c4fce2f8df6c499cc1c763e4394b7482525bf2a9701c9d79d215f519e4" dependencies = [ "bitflags 2.6.0", "cfg-if", - "cfg_aliases 0.1.1", + "cfg_aliases", "libc", ] @@ -3342,7 +3634,7 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" dependencies = [ - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-complex", "num-integer", "num-iter", @@ -3363,9 +3655,9 @@ dependencies = [ [[package]] name = "num-bigint" -version = "0.4.6" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" dependencies = [ "num-integer", "num-traits", @@ -3452,7 +3744,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" dependencies = [ - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-integer", "num-traits", "serde", @@ -3503,9 +3795,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.3.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3515,9 +3807,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" dependencies = [ "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3528,9 +3820,9 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.2" +version = "0.35.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f203fa8daa7bb185f760ae12bd8e097f63d17041dcdcaf675ac54cdf863170e" +checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" dependencies = [ "memchr", ] @@ -3568,9 +3860,9 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3729,9 +4021,9 @@ dependencies = [ [[package]] name = "parity-scale-codec" -version = "3.6.12" +version = "3.6.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" +checksum = "a1b5927e4a9ae8d6cdb6a69e4e04a0ec73381a358e21b8a576f44769f34e7c24" dependencies = [ "arrayvec 0.7.4", "bitvec", @@ -3743,12 +4035,12 @@ dependencies = [ [[package]] name = "parity-scale-codec-derive" -version = "3.6.12" +version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" +checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 3.1.0", - "proc-macro2 1.0.86", + "proc-macro-crate 2.0.0", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -3777,9 +4069,9 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.3", + "redox_syscall 0.5.1", "smallvec", - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -3788,12 +4080,6 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" -[[package]] -name = "peeking_take_while" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" - [[package]] name = "pem" version = "3.0.4" @@ -3819,6 +4105,51 @@ version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +[[package]] +name = "pest" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd53dff83f26735fdc1ca837098ccf133605d794cdae66acfc2bfac3ec809d95" +dependencies = [ + "memchr", + "thiserror", + "ucd-trie", +] + +[[package]] +name = "pest_derive" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a548d2beca6773b1c244554d36fcf8548a8a58e74156968211567250e48e49a" +dependencies = [ + "pest", + "pest_generator", +] + +[[package]] +name = "pest_generator" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c93a82e8d145725dcbaf44e5ea887c8a869efdcc28706df2d08c69e17077183" +dependencies = [ + "pest", + "pest_meta", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "pest_meta" +version = "2.7.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a941429fea7e08bedec25e4f6785b6ffaacc6b755da98df5ef3e7dcf4a124c4f" +dependencies = [ + "once_cell", + "pest", + "sha2 0.10.8", +] + [[package]] name = "petgraph" version = "0.6.5" @@ -3844,9 +4175,9 @@ version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -3953,8 +4284,8 @@ version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" dependencies = [ - "proc-macro2 1.0.86", - "syn 2.0.72", + "proc-macro2 1.0.85", + "syn 2.0.66", ] [[package]] @@ -3989,6 +4320,15 @@ dependencies = [ "toml_edit 0.19.15", ] +[[package]] +name = "proc-macro-crate" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +dependencies = [ + "toml_edit 0.20.2", +] + [[package]] name = "proc-macro-crate" version = "3.1.0" @@ -4005,7 +4345,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", "version_check", @@ -4017,7 +4357,7 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "version_check", ] @@ -4039,18 +4379,18 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" dependencies = [ "unicode-ident", ] [[package]] name = "prometheus-client" -version = "0.22.3" +version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" +checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" dependencies = [ "dtoa", "itoa", @@ -4064,16 +4404,16 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "proptest" -version = "1.5.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c2511913b88df1637da85cc8d96ec8e43a3f8bb8ccb71ee1ac240d6f3df58d" +checksum = "31b476131c3c86cb68032fdc5cb6d5a1045e3e42d96b69fa599fd77701e1f5bf" dependencies = [ "bit-set", "bit-vec", @@ -4083,7 +4423,7 @@ dependencies = [ "rand 0.8.5", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.3", "rusty-fork", "tempfile", "unarray", @@ -4126,7 +4466,7 @@ dependencies = [ "prost 0.12.6", "prost-types", "regex", - "syn 2.0.72", + "syn 2.0.66", "tempfile", ] @@ -4138,9 +4478,9 @@ checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", "itertools 0.12.1", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -4151,9 +4491,9 @@ checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" dependencies = [ "anyhow", "itertools 0.13.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -4217,7 +4557,7 @@ dependencies = [ "bincode", "chrono", "circuit_definitions", - "clap 4.5.11", + "clap 4.5.4", "colored", "dialoguer", "hex", @@ -4263,7 +4603,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -4274,6 +4614,12 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" +[[package]] +name = "quick-error" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a993555f31e5a609f617c12db6250dedcac1b0a85076912c436e6fc9b2c8e6a3" + [[package]] name = "quick-protobuf" version = "0.8.1" @@ -4298,7 +4644,7 @@ version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", ] [[package]] @@ -4414,9 +4760,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.3" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a908a6e00f1fdd0dfd9c0eb08ce85126f6d8bbda50017e74bc4a4b7d4a926a4" +checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" dependencies = [ "bitflags 2.6.0", ] @@ -4429,8 +4775,8 @@ checksum = "4219d74c6b67a3654a9fbebc4b419e22126d13d2f3c4a07ee0cb61ff79a79619" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -4444,13 +4790,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.3", ] [[package]] @@ -4461,9 +4807,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "rend" @@ -4488,7 +4834,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.30", + "hyper 0.14.29", "hyper-tls 0.5.0", "ipnet", "js-sys", @@ -4528,9 +4874,9 @@ dependencies = [ "futures-util", "h2 0.4.5", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-rustls", "hyper-tls 0.6.0", "hyper-util", @@ -4661,7 +5007,7 @@ version = "0.7.44" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7dddfff8de25e6f62b9d64e6e432bf1c6736c57d20323e15ee10435fbda7c65" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -4770,9 +5116,9 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a88d6d420651b496bdd98684116959239430022a115c1240e6c3993be0b15fba" +checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" dependencies = [ "openssl-probe", "rustls-pemfile 2.1.2", @@ -4835,9 +5181,9 @@ checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" [[package]] name = "rustls-webpki" -version = "0.102.6" +version = "0.102.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e6b52d4fda176fd835fdc55a835d4a89b8499cad995885a21149d5ad62f852e" +checksum = "84678086bd54edf2b415183ed7a94d0efb049f1b646a33e22a36f3794be6ae56" dependencies = [ "aws-lc-rs", "ring", @@ -4858,7 +5204,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cb3dcc6e454c328bb824492db107ab7c0ae8fcffe4ad210136ef014458c1bc4f" dependencies = [ "fnv", - "quick-error", + "quick-error 1.2.3", "tempfile", "wait-timeout", ] @@ -4887,6 +5233,30 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "schemars" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" +dependencies = [ + "dyn-clone", + "schemars_derive", + "serde", + "serde_json", +] + +[[package]] +name = "schemars_derive" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" +dependencies = [ + "proc-macro2 1.0.85", + "quote 1.0.36", + "serde_derive_internals", + "syn 2.0.66", +] + [[package]] name = "scopeguard" version = "1.2.0" @@ -4951,28 +5321,29 @@ version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9bd1c54ea06cfd2f6b63219704de0b9b4f72dcc2b8fdef820be6cd799780e91e" dependencies = [ + "serde", "zeroize", ] [[package]] name = "security-framework" -version = "2.11.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" dependencies = [ "bitflags 2.6.0", "core-foundation", "core-foundation-sys", "libc", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "security-framework-sys", ] [[package]] name = "security-framework-sys" -version = "2.11.1" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75da29fe9b9b08fe9d6b22b5b4bcbc75d8db3aa31e639aa56bb62e9d46bfceaf" +checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" dependencies = [ "core-foundation-sys", "libc", @@ -5129,16 +5500,27 @@ version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", + "quote 1.0.36", + "syn 2.0.66", +] + +[[package]] +name = "serde_derive_internals" +version = "0.29.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" +dependencies = [ + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "serde_json" -version = "1.0.121" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ab380d7d9f22ef3f21ad3e6c1ebe8e4fc7a2000ccba2e4d71fc96f15b2cb609" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", "memchr", @@ -5186,8 +5568,8 @@ version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ - "darling", - "proc-macro2 1.0.86", + "darling 0.13.4", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -5300,9 +5682,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebb6d928451f0779f14da02ee9d51d4bde560328edc6471f0d5c5c11954345c4" +checksum = "92776ca824f49c255a7417939706d759e0fd3dd4217420d01da68beae04f0bd6" dependencies = [ "bincode", "blake2 0.10.6", @@ -5366,7 +5748,7 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-traits", "thiserror", "time", @@ -5463,10 +5845,11 @@ checksum = "c85070f382340e8b23a75808e83573ddf65f9ad9143df9573ca37c1ed2ee956a" [[package]] name = "sqlformat" -version = "0.2.4" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f895e3734318cc55f1fe66258926c9b910c124d47520339efecbb6c59cec7c1f" +checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" dependencies = [ + "itertools 0.12.1", "nom", "unicode_categories", ] @@ -5534,11 +5917,11 @@ version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "sqlx-core", "sqlx-macros-core", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -5552,7 +5935,7 @@ dependencies = [ "heck 0.5.0", "hex", "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "serde", "serde_json", @@ -5561,7 +5944,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.72", + "syn 2.0.66", "tempfile", "tokio", "url", @@ -5640,7 +6023,7 @@ dependencies = [ "log", "md-5", "memchr", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "once_cell", "rand 0.8.5", "rust_decimal", @@ -5739,7 +6122,7 @@ checksum = "dcb5ae327f9cc13b68763b5749770cb9e048a99bd9dfdfa58d0cf05d5f64afe0" dependencies = [ "heck 0.3.3", "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -5760,17 +6143,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck 0.5.0", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "rustversion", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] name = "subtle" -version = "2.6.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" +checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" [[package]] name = "syn" @@ -5789,18 +6172,18 @@ version = "1.0.109" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.72" +version = "2.0.66" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "c42f3f41a2de00b01c0aaad383c5a45241efc8b2d1eda5661812fda5f3cdcff5" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "unicode-ident", ] @@ -5812,9 +6195,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -5889,7 +6272,7 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3dffced63c2b5c7be278154d76b479f9f9920ed34e7574201407f0b14e2bbb93" dependencies = [ - "env_logger 0.11.5", + "env_logger 0.11.3", "test-log-macros", "tracing-subscriber", ] @@ -5900,9 +6283,9 @@ version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -5916,22 +6299,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6004,9 +6387,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.8.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -6019,31 +6402,32 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.39.2" +version = "1.38.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daa4fb1bc778bd6f04cbfc4bb2d06a7396a8f299dc33ea1900cedaa316f467b1" +checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" dependencies = [ "backtrace", "bytes", "libc", "mio", + "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.48.0", ] [[package]] name = "tokio-macros" -version = "2.4.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" +checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6089,14 +6473,15 @@ dependencies = [ "futures-io", "futures-sink", "pin-project-lite", + "slab", "tokio", ] [[package]] name = "toml_datetime" -version = "0.6.7" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8fb9f64314842840f1d940ac544da178732128f1c78c21772e876579e0da1db" +checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" [[package]] name = "toml_edit" @@ -6120,6 +6505,17 @@ dependencies = [ "winnow", ] +[[package]] +name = "toml_edit" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +dependencies = [ + "indexmap 2.2.6", + "toml_datetime", + "winnow", +] + [[package]] name = "toml_edit" version = "0.21.1" @@ -6144,9 +6540,9 @@ dependencies = [ "bytes", "h2 0.4.5", "http 1.1.0", - "http-body 1.0.1", + "http-body 1.0.0", "http-body-util", - "hyper 1.4.1", + "hyper 1.3.1", "hyper-timeout", "hyper-util", "percent-encoding", @@ -6181,6 +6577,25 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower-http" +version = "0.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5" +dependencies = [ + "base64 0.21.7", + "bitflags 2.6.0", + "bytes", + "http 1.1.0", + "http-body 1.0.0", + "http-body-util", + "mime", + "pin-project-lite", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-layer" version = "0.3.2" @@ -6211,9 +6626,9 @@ version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6287,6 +6702,27 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-test" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "557b891436fe0d5e0e363427fc7f217abf9ccd510d5136549847bdcbcd011d68" +dependencies = [ + "tracing-core", + "tracing-subscriber", + "tracing-test-macro", +] + +[[package]] +name = "tracing-test-macro" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "04659ddb06c87d233c566112c1c9c5b9e98256d9af50ec3bc9c8327f873a7568" +dependencies = [ + "quote 1.0.36", + "syn 2.0.66", +] + [[package]] name = "try-lock" version = "0.2.5" @@ -6305,6 +6741,12 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +[[package]] +name = "ucd-trie" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed646292ffc8188ef8ea4d1e0e0150fb15a5c2e12ad9b8fc191ae7a8a7f3c4b9" + [[package]] name = "uint" version = "0.9.5" @@ -6422,9 +6864,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.10.0" +version = "2.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72139d247e5f97a3eff96229a7ae85ead5328a39efe76f8bf5a06313d505b6ea" +checksum = "d11a831e3c0b56e438a28308e7c810799e3c118417f342d30ecec080105395cd" dependencies = [ "base64 0.22.1", "log", @@ -6453,15 +6895,15 @@ checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" [[package]] name = "utf8parse" -version = "0.2.2" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" [[package]] name = "uuid" -version = "1.10.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ "serde", ] @@ -6486,9 +6928,9 @@ checksum = "f1bddf1187be692e79c5ffeab891132dfb0f236ed36a43c7ed39f1165ee20191" [[package]] name = "version_check" -version = "0.9.5" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "vise" @@ -6510,7 +6952,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "671d3b894d5d0849f0a597f56bf071f42d4f2a1cbcf2f78ca21f870ab7c0cc2b" dependencies = [ - "hyper 0.14.30", + "hyper 0.14.29", "once_cell", "tokio", "tracing", @@ -6523,9 +6965,9 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a511871dc5de990a3b2a0e715facfbc5da848c0c0395597a1415029fb7c250a" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6587,9 +7029,9 @@ dependencies = [ "bumpalo", "log", "once_cell", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", "wasm-bindgen-shared", ] @@ -6621,9 +7063,9 @@ version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6669,9 +7111,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.3" +version = "0.26.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd7c23921eeb1713a4e851530e9b9756e4fb0e89978582942612524cf09f01cd" +checksum = "3c452ad30530b54a4d8e71952716a212b08efd0f3562baa66c29a618b07da7c3" dependencies = [ "rustls-pki-types", ] @@ -6735,7 +7177,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -6753,7 +7195,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.6", + "windows-targets 0.52.5", ] [[package]] @@ -6773,18 +7215,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", + "windows_aarch64_gnullvm 0.52.5", + "windows_aarch64_msvc 0.52.5", + "windows_i686_gnu 0.52.5", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", + "windows_i686_msvc 0.52.5", + "windows_x86_64_gnu 0.52.5", + "windows_x86_64_gnullvm 0.52.5", + "windows_x86_64_msvc 0.52.5", ] [[package]] @@ -6795,9 +7237,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" +checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" [[package]] name = "windows_aarch64_msvc" @@ -6807,9 +7249,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" +checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" [[package]] name = "windows_i686_gnu" @@ -6819,15 +7261,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" +checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" [[package]] name = "windows_i686_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" +checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" [[package]] name = "windows_i686_msvc" @@ -6837,9 +7279,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" [[package]] name = "windows_x86_64_gnu" @@ -6849,9 +7291,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" [[package]] name = "windows_x86_64_gnullvm" @@ -6861,9 +7303,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" [[package]] name = "windows_x86_64_msvc" @@ -6873,9 +7315,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.6" +version = "0.52.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" [[package]] name = "winnow" @@ -6923,22 +7365,22 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.7.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -6956,9 +7398,9 @@ version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -7029,9 +7471,9 @@ dependencies = [ [[package]] name = "zk_evm" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a6e69931f24db5cf333b714721e8d80ff88bfdb7da8c3dc7882612ffddb8d27" +checksum = "c14bda6c101389145cd01fac900f1392876bc0284d98faf7f376237baa2cb19d" dependencies = [ "anyhow", "lazy_static", @@ -7039,7 +7481,7 @@ dependencies = [ "serde", "serde_json", "static_assertions", - "zk_evm_abstractions 0.150.5", + "zk_evm_abstractions 0.150.6", ] [[package]] @@ -7070,35 +7512,35 @@ dependencies = [ [[package]] name = "zk_evm_abstractions" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6b0720261ab55490fe3a96e96de30d5d7b277940b52ea7f52dbf564eb1748" +checksum = "a008f2442fc6a508bdd1f902380242cb6ff11b8b27acdac2677c6d9f75cbb004" dependencies = [ "anyhow", "num_enum 0.6.1", "serde", "static_assertions", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", ] [[package]] name = "zkevm-assembly" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99106038062537c05b4e6e7754d1bbba28ba16185a3e5ee5ad22e2f8be883bb" +checksum = "5dc743ac7b0d618536dc3ace798fd4b8af78b057884afda5785c7970e15d62d0" dependencies = [ "env_logger 0.9.3", "hex", "lazy_static", "log", "nom", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-traits", "sha3 0.10.8", "smallvec", "structopt", "thiserror", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", ] [[package]] @@ -7147,9 +7589,9 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" +checksum = "1f68518aedd5358b17224771bb78bacd912cf66011aeda98b1f887cfb9e0972f" dependencies = [ "arrayvec 0.7.4", "boojum", @@ -7161,7 +7603,7 @@ dependencies = [ "seq-macro", "serde", "smallvec", - "zkevm_opcode_defs 0.150.5", + "zkevm_opcode_defs 0.150.6", "zksync_cs_derive", ] @@ -7209,9 +7651,9 @@ dependencies = [ [[package]] name = "zkevm_opcode_defs" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79055eae1b6c1ab80793ed9d77d2964c9c896afa4b5dfed278cf58cd10acfe8f" +checksum = "762b5f1c1b283c5388995a85d40a05aef1c14f50eb904998b7e9364739f5b899" dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", @@ -7226,13 +7668,13 @@ dependencies = [ [[package]] name = "zkevm_test_harness" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "550f82d3b7448c35168dc13bfadbccd5fd306097b6e1ea01793151c1c9137a36" +checksum = "73ad3e73d290a38a35dd245fd68cb6f498a8a8da4a52f846e88da3d3c31a34fd" dependencies = [ "bincode", "circuit_definitions", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "codegen", "crossbeam", "derivative", @@ -7253,11 +7695,10 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86511b3957adfe415ecdbd1ee01c51aa3ca131a607e61ca024976312f613b0f9" +checksum = "d555e24b853359c5b076c52f9ff9e0ed62a7edc8c2f82f93517c524410c21ecb" dependencies = [ - "bindgen 0.59.2", "cmake", "crossbeam", "derivative", @@ -7269,9 +7710,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e4c00f2db603d1b696bc2e9d822bb4c087050de5b65559067fc2232786cbc93" +checksum = "615dad34e5fe678ec3b3e029af3f19313bebb1b771a8ce963c9ab9a8cc3879d3" dependencies = [ "bit-vec", "cfg-if", @@ -7286,9 +7727,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.9" +version = "0.151.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d58df1ec10e0d5eb58563bb01abda5ed185c9b9621502e361848ca40eb7868ac" +checksum = "80721b2da2643bd43f664ac65673ee078e6973c0a88d75b73bfaeac8e1bf5432" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -7344,7 +7785,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "clap 4.5.11", + "clap 4.5.4", "shivini", "tokio", "tokio-util", @@ -7366,9 +7807,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -7391,8 +7832,12 @@ dependencies = [ "rand 0.8.5", "secrecy", "serde", + "strum", + "strum_macros", + "time", "tracing", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -7402,9 +7847,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -7412,7 +7857,7 @@ dependencies = [ "elliptic-curve 0.13.8", "hex", "k256 0.13.3", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-traits", "rand 0.8.5", "sha3 0.10.8", @@ -7423,14 +7868,14 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", "hex", - "num-bigint 0.4.6", + "num-bigint 0.4.5", "prost 0.12.6", "rand 0.8.5", "serde", @@ -7445,9 +7890,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" +checksum = "b2b2aab4ed18b13cd584f4edcc2546c8da82f89ac62e525063e12935ff28c9be" dependencies = [ "anyhow", "async-trait", @@ -7465,9 +7910,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand 0.8.5", @@ -7526,7 +7971,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5939e2df4288c263c706ff18ac718e984149223ad4289d6d957d767dcfc04c81" dependencies = [ "proc-macro-error", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "syn 1.0.109", ] @@ -7552,10 +7997,13 @@ dependencies = [ "tracing", "vise", "zksync_concurrency", + "zksync_consensus_crypto", "zksync_consensus_roles", "zksync_consensus_storage", + "zksync_consensus_utils", "zksync_contracts", "zksync_db_connection", + "zksync_l1_contract_interface", "zksync_protobuf", "zksync_protobuf_build", "zksync_system_constants", @@ -7615,7 +8063,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -7637,10 +8086,10 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f91e58e75d65877f09f83bc3dca8f054847ae7ec4f3e64bfa610a557edd8e8e" dependencies = [ - "num-bigint 0.4.6", + "num-bigint 0.4.5", "num-integer", "num-traits", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "quote 1.0.36", "serde", "syn 1.0.109", @@ -7648,9 +8097,9 @@ dependencies = [ [[package]] name = "zksync_kzg" -version = "0.150.5" +version = "0.150.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edb8a9c76c172a6d639855ee342b9a670e3ba472f5ae302f771b1c3ee777dc88" +checksum = "6c006b6b7a27cc50ff0c515b6d0b197dbb907bbf65d1d2ea42fc3ed21b315642" dependencies = [ "boojum", "derivative", @@ -7660,7 +8109,23 @@ dependencies = [ "serde", "serde_json", "serde_with", - "zkevm_circuits 0.150.5", + "zkevm_circuits 0.150.6", +] + +[[package]] +name = "zksync_l1_contract_interface" +version = "0.1.0" +dependencies = [ + "anyhow", + "hex", + "once_cell", + "sha2 0.10.8", + "sha3 0.10.8", + "zksync_kzg", + "zksync_prover_interface", + "zksync_solidity_vk_codegen", + "zksync_system_constants", + "zksync_types", ] [[package]] @@ -7682,7 +8147,8 @@ dependencies = [ "circuit_sequencer_api 0.140.3", "circuit_sequencer_api 0.141.2", "circuit_sequencer_api 0.142.2", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", + "ethabi", "hex", "itertools 0.10.5", "once_cell", @@ -7693,7 +8159,7 @@ dependencies = [ "zk_evm 0.133.0", "zk_evm 0.140.0", "zk_evm 0.141.0", - "zk_evm 0.150.5", + "zk_evm 0.150.6", "zksync_contracts", "zksync_mini_merkle_tree", "zksync_system_constants", @@ -7746,8 +8212,8 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "circuit_sequencer_api 0.150.5", - "clap 4.5.11", + "circuit_sequencer_api 0.150.6", + "clap 4.5.4", "ctrlc", "futures 0.3.30", "reqwest 0.12.5", @@ -7775,9 +8241,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -7796,19 +8262,19 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck 0.5.0", "prettyplease", - "proc-macro2 1.0.86", + "proc-macro2 1.0.85", "prost-build", "prost-reflect", "protox", "quote 1.0.36", - "syn 2.0.72", + "syn 2.0.66", ] [[package]] @@ -7822,6 +8288,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -7830,6 +8297,45 @@ dependencies = [ "zksync_types", ] +[[package]] +name = "zksync_prover_autoscaler" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "axum", + "chrono", + "clap 4.5.4", + "ctrlc", + "debug-map-sorted", + "futures 0.3.30", + "k8s-openapi", + "kube", + "once_cell", + "regex", + "reqwest 0.12.5", + "ring", + "rustls", + "serde", + "serde_json", + "structopt", + "strum", + "time", + "tokio", + "tracing", + "tracing-subscriber", + "tracing-test", + "url", + "vise", + "zksync_config", + "zksync_core_leftovers", + "zksync_protobuf_config", + "zksync_prover_job_monitor", + "zksync_types", + "zksync_utils", + "zksync_vlog", +] + [[package]] name = "zksync_prover_dal" version = "0.1.0" @@ -7847,7 +8353,7 @@ dependencies = [ "anyhow", "async-trait", "circuit_definitions", - "clap 4.5.11", + "clap 4.5.4", "ctrlc", "futures 0.3.30", "local-ip-address", @@ -7880,7 +8386,7 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "clap 4.5.11", + "clap 4.5.4", "ctrlc", "futures 0.3.30", "log", @@ -7933,7 +8439,7 @@ name = "zksync_prover_interface" version = "0.1.0" dependencies = [ "chrono", - "circuit_sequencer_api 0.150.5", + "circuit_sequencer_api 0.150.6", "serde", "serde_with", "strum", @@ -7949,7 +8455,7 @@ dependencies = [ "anyhow", "async-trait", "axum", - "clap 4.5.11", + "clap 4.5.4", "ctrlc", "serde", "tokio", @@ -7999,6 +8505,23 @@ dependencies = [ "zksync_utils", ] +[[package]] +name = "zksync_solidity_vk_codegen" +version = "0.30.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b310ab8a21681270e73f177ddf7974cabb7a96f0624ab8b008fd6ee1f9b4f687" +dependencies = [ + "ethereum-types", + "franklin-crypto", + "handlebars", + "hex", + "paste", + "rescue_poseidon", + "serde", + "serde_derive", + "serde_json", +] + [[package]] name = "zksync_system_constants" version = "0.1.0" @@ -8025,7 +8548,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -8033,7 +8555,6 @@ dependencies = [ "thiserror", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", @@ -8070,7 +8591,7 @@ version = "0.1.0" dependencies = [ "anyhow", "circuit_definitions", - "clap 4.5.11", + "clap 4.5.4", "indicatif", "proptest", "toml_edit 0.14.4", @@ -8111,20 +8632,20 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "enum_dispatch", "primitive-types", - "zk_evm_abstractions 0.150.5", - "zkevm_opcode_defs 0.150.5", + "zk_evm_abstractions 0.150.6", + "zkevm_opcode_defs 0.150.6", "zksync_vm2_interface", ] [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=74577d9be13b1bff9d1a712389731f669b179e47#74577d9be13b1bff9d1a712389731f669b179e47" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=df5bec3d04d64d434f9b0ccb285ba4681008f7b3#df5bec3d04d64d434f9b0ccb285ba4681008f7b3" dependencies = [ "primitive-types", ] @@ -8211,7 +8732,7 @@ dependencies = [ "anyhow", "async-trait", "bincode", - "clap 4.5.11", + "clap 4.5.4", "ctrlc", "tokio", "tracing", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index e95bae3d4c1..af022e691c1 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -1,8 +1,5 @@ [workspace] -members = [ - "crates/bin/*", - "crates/lib/*", -] +members = ["crates/bin/*", "crates/lib/*"] resolver = "2" @@ -19,20 +16,23 @@ categories = ["cryptography"] [workspace.dependencies] # Common dependencies anyhow = "1.0" -axum = "0.7.5" async-trait = "0.1" +axum = "0.7.5" bincode = "1" chrono = "0.4.38" clap = "4.4.6" colored = "2.0" const-decoder = "0.3.0" ctrlc = "3.1" +debug-map-sorted = "0.1.1" dialoguer = "0.11" futures = "0.3" hex = "0.4" -itertools = "0.10.5" indicatif = "0.16" +itertools = "0.10.5" jemallocator = "0.5" +k8s-openapi = { version = "0.23.0", features = ["v1_30"] } +kube = { version = "0.95.0", features = ["runtime", "derive"] } local-ip-address = "0.5.0" log = "0.4.20" md5 = "0.7.0" @@ -42,6 +42,8 @@ queues = "1.1.0" rand = "0.8" regex = "1.10.4" reqwest = "0.12" +ring = "0.17.8" +rustls = { version = "0.23.12", features = ["ring"] } serde = "1.0" serde_derive = "1.0" serde_json = "1.0" @@ -50,21 +52,24 @@ sqlx = { version = "0.8.1", default-features = false } structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" +time = "0.3.36" tokio = "1" tokio-util = "0.7.11" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" +tracing-test = "0.2.5" +url = "2.5.2" vise = "0.2.0" # Proving dependencies -circuit_definitions = "=0.150.5" -circuit_sequencer_api = "=0.150.5" -zkevm_test_harness = "=0.150.5" +circuit_definitions = "=0.150.6" +circuit_sequencer_api = "=0.150.6" +zkevm_test_harness = "=0.150.6" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.9" } -shivini = "=0.150.9" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.151.0" } +shivini = "=0.151.0" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } @@ -84,6 +89,7 @@ zksync_eth_client = { path = "../core/lib/eth_client" } zksync_contracts = { path = "../core/lib/contracts" } zksync_core_leftovers = { path = "../core/lib/zksync_core_leftovers" } zksync_periodic_job = { path = "../core/lib/periodic_job" } +zksync_protobuf_config = { path = "../core/lib/protobuf_config" } # Prover workspace dependencies zksync_prover_dal = { path = "crates/lib/prover_dal" } @@ -91,6 +97,7 @@ zksync_prover_fri_types = { path = "crates/lib/prover_fri_types" } zksync_prover_fri_utils = { path = "crates/lib/prover_fri_utils" } zksync_prover_keystore = { path = "crates/lib/keystore" } zksync_vk_setup_data_generator_server_fri = { path = "crates/bin/vk_setup_data_generator_server_fri" } +zksync_prover_job_monitor = { path = "crates/bin/prover_job_monitor" } # for `perf` profiling [profile.perf] diff --git a/prover/crates/bin/prover_autoscaler/Cargo.toml b/prover/crates/bin/prover_autoscaler/Cargo.toml new file mode 100644 index 00000000000..fbf3ecae909 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "zksync_prover_autoscaler" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +zksync_core_leftovers.workspace = true +zksync_vlog.workspace = true +zksync_utils.workspace = true +zksync_types.workspace = true +zksync_config = { workspace = true, features = ["observability_ext"] } +zksync_prover_job_monitor.workspace = true +zksync_protobuf_config.workspace = true + +debug-map-sorted.workspace = true +anyhow.workspace = true +async-trait.workspace = true +axum.workspace = true +chrono.workspace = true +clap = { workspace = true, features = ["derive"] } +ctrlc = { workspace = true, features = ["termination"] } +futures.workspace = true +k8s-openapi = { workspace = true, features = ["v1_30"] } +kube = { workspace = true, features = ["runtime", "derive"] } +once_cell.workspace = true +regex.workspace = true +reqwest = { workspace = true, features = ["json"] } +ring.workspace = true +rustls = { workspace = true, features = ["ring"] } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +structopt.workspace = true +strum.workspace = true +time.workspace = true +tokio = { workspace = true, features = ["time", "macros"] } +tracing-subscriber = { workspace = true, features = ["env-filter"] } +tracing.workspace = true +url.workspace = true +vise.workspace = true +tracing-test.workspace = true diff --git a/prover/crates/bin/prover_autoscaler/src/agent.rs b/prover/crates/bin/prover_autoscaler/src/agent.rs new file mode 100644 index 00000000000..f810bc41672 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/agent.rs @@ -0,0 +1,130 @@ +use std::net::SocketAddr; + +use anyhow::Context as _; +use axum::{ + extract::State, + response::IntoResponse, + routing::{get, post}, + Json, Router, +}; +use futures::future; +use reqwest::StatusCode; +use serde::{Deserialize, Serialize}; +use tokio::sync::watch; + +use crate::{ + cluster_types::Cluster, + k8s::{Scaler, Watcher}, +}; + +struct AppError(anyhow::Error); + +impl IntoResponse for AppError { + fn into_response(self) -> axum::response::Response { + ( + StatusCode::INTERNAL_SERVER_ERROR, + format!("Something went wrong: {}", self.0), + ) + .into_response() + } +} + +pub async fn run_server( + port: u16, + watcher: Watcher, + scaler: Scaler, + mut stop_receiver: watch::Receiver, +) -> anyhow::Result<()> { + let bind_address = SocketAddr::from(([0, 0, 0, 0], port)); + tracing::debug!("Starting Autoscaler agent on {bind_address}"); + let app = create_agent_router(watcher, scaler); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding Autoscaler agent to {bind_address}"))?; + axum::serve(listener, app) + .with_graceful_shutdown(async move { + if stop_receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for Autoscaler agent was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, Autoscaler agent is shutting down"); + }) + .await + .context("Autoscaler agent failed")?; + tracing::info!("Autoscaler agent shut down"); + Ok(()) +} + +fn create_agent_router(watcher: Watcher, scaler: Scaler) -> Router { + let app = App { watcher, scaler }; + Router::new() + .route("/healthz", get(health)) + .route("/cluster", get(get_cluster)) + .route("/scale", post(scale)) + .with_state(app) +} + +// TODO: Use +// https://github.com/matter-labs/zksync-era/blob/9821a20018c367ce246dba656daab5c2e7757973/core/node/api_server/src/healthcheck.rs#L53 +// instead. +async fn health() -> &'static str { + "Ok\n" +} + +#[derive(Clone)] +struct App { + watcher: Watcher, + scaler: Scaler, +} + +async fn get_cluster(State(app): State) -> Result, AppError> { + let cluster = app.watcher.cluster.lock().await.clone(); + Ok(Json(cluster)) +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct ScaleDeploymentRequest { + pub namespace: String, + pub name: String, + pub size: i32, +} + +#[derive(Clone, Debug, Default, Serialize, Deserialize)] +pub struct ScaleRequest { + pub deployments: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct ScaleResponse { + pub scale_result: Vec, +} + +/// To test or forse scale in particular cluster use: +/// $ curl -X POST -H "Content-Type: application/json" --data '{"deployments": [{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-f", "size":0},{"namespace": "prover-red", "name": "witness-vector-generator-spec-9-c", "size":0}]}' :8081/scale +async fn scale( + State(app): State, + Json(payload): Json, +) -> Result, AppError> { + let handles: Vec<_> = payload + .deployments + .into_iter() + .map(|d| { + let s = app.scaler.clone(); + tokio::spawn(async move { + match s.scale(&d.namespace, &d.name, d.size).await { + Ok(()) => "".to_string(), + Err(err) => err.to_string(), + } + }) + }) + .collect(); + + let scale_result = future::join_all(handles) + .await + .into_iter() + .map(Result::unwrap) + .collect(); + Ok(Json(ScaleResponse { scale_result })) +} diff --git a/prover/crates/bin/prover_autoscaler/src/cluster_types.rs b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs new file mode 100644 index 00000000000..b800b86f3c2 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/cluster_types.rs @@ -0,0 +1,60 @@ +use std::collections::{BTreeMap, HashMap}; + +use chrono::{DateTime, Utc}; +use serde::{Deserialize, Serialize, Serializer}; +use strum::{Display, EnumString}; + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Pod { + // pub name: String, // TODO: Consider if it's needed. + pub owner: String, + pub status: String, + pub changed: DateTime, +} +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Deployment { + // pub name: String, // TODO: Consider if it's needed. + pub running: i32, + pub desired: i32, +} + +fn ordered_map( + value: &HashMap, + serializer: S, +) -> Result +where + S: Serializer, +{ + let ordered: BTreeMap<_, _> = value.iter().collect(); + ordered.serialize(serializer) +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Namespace { + #[serde(serialize_with = "ordered_map")] + pub deployments: HashMap, + pub pods: HashMap, +} + +#[derive(Debug, Clone, Default, Serialize, Deserialize)] +pub struct Cluster { + pub name: String, + pub namespaces: HashMap, +} + +#[derive(Debug, Default, Clone, Serialize, Deserialize)] +pub struct Clusters { + pub clusters: HashMap, + /// Map from cluster to index in agent URLs Vec. + pub agent_ids: HashMap, +} + +#[derive(Default, Debug, EnumString, Display, Hash, PartialEq, Eq, Clone, Copy)] +pub enum PodStatus { + #[default] + Unknown, + Running, + Pending, + LongPending, + NeedToMove, +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/mod.rs b/prover/crates/bin/prover_autoscaler/src/global/mod.rs new file mode 100644 index 00000000000..5e4afb93843 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/mod.rs @@ -0,0 +1,3 @@ +pub mod queuer; +pub mod scaler; +pub mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/global/queuer.rs b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs new file mode 100644 index 00000000000..32610ebf3c3 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/queuer.rs @@ -0,0 +1,49 @@ +use std::collections::HashMap; + +use anyhow::{Context, Ok}; +use reqwest::Method; +use zksync_prover_job_monitor::autoscaler_queue_reporter::VersionedQueueReport; +use zksync_utils::http_with_retries::send_request_with_retries; + +use crate::metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}; + +const MAX_RETRIES: usize = 5; + +#[derive(Debug)] +pub struct Queue { + pub queue: HashMap, +} + +#[derive(Default)] +pub struct Queuer { + pub prover_job_monitor_url: String, +} + +impl Queuer { + pub fn new(pjm_url: String) -> Self { + Self { + prover_job_monitor_url: pjm_url, + } + } + + pub async fn get_queue(&self) -> anyhow::Result { + let url = &self.prover_job_monitor_url; + let response = send_request_with_retries(url, MAX_RETRIES, Method::GET, None, None).await; + let response = response.map_err(|err| { + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching queue from url: {url}: {err:?}") + })?; + + AUTOSCALER_METRICS.calls[&(url.clone(), response.status().as_u16())].inc(); + let response = response + .json::>() + .await + .context("Failed to read response as json")?; + Ok(Queue { + queue: response + .iter() + .map(|x| (x.version.to_string(), x.report.prover_jobs.queued as u64)) + .collect::>(), + }) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/scaler.rs b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs new file mode 100644 index 00000000000..884174562a1 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/scaler.rs @@ -0,0 +1,751 @@ +use std::{collections::HashMap, str::FromStr}; + +use chrono::Utc; +use debug_map_sorted::SortedOutputExt; +use once_cell::sync::Lazy; +use regex::Regex; +use zksync_config::configs::prover_autoscaler::{Gpu, ProverAutoscalerScalerConfig}; + +use super::{queuer, watcher}; +use crate::{ + agent::{ScaleDeploymentRequest, ScaleRequest}, + cluster_types::{Cluster, Clusters, Pod, PodStatus}, + metrics::AUTOSCALER_METRICS, + task_wiring::Task, +}; + +const DEFAULT_SPEED: u32 = 500; + +#[derive(Default, Debug, PartialEq, Eq)] +struct GPUPool { + name: String, + gpu: Gpu, + provers: HashMap, // TODO: consider using i64 everywhere to avoid type casts. + preemtions: u64, + max_pool_size: u32, +} + +impl GPUPool { + fn sum_by_pod_status(&self, ps: PodStatus) -> u32 { + self.provers.get(&ps).cloned().unwrap_or(0) + } + + fn to_key(&self) -> GPUPoolKey { + GPUPoolKey { + cluster: self.name.clone(), + gpu: self.gpu, + } + } +} + +#[derive(Debug, Eq, Hash, PartialEq)] +struct GPUPoolKey { + cluster: String, + gpu: Gpu, +} + +static PROVER_DEPLOYMENT_RE: Lazy = + Lazy::new(|| Regex::new(r"^circuit-prover-gpu(-(?[ltvpa]\d+))?$").unwrap()); +static PROVER_POD_RE: Lazy = + Lazy::new(|| Regex::new(r"^circuit-prover-gpu(-(?[ltvpa]\d+))?").unwrap()); + +/// gpu_to_prover converts Gpu type to corresponding deployment name. +fn gpu_to_prover(gpu: Gpu) -> String { + let s = "circuit-prover-gpu"; + match gpu { + Gpu::Unknown => "".into(), + Gpu::L4 => s.into(), + _ => format!("{}-{}", s, gpu.to_string().to_lowercase()), + } +} + +pub struct Scaler { + /// namespace to Protocol Version configuration. + namespaces: HashMap, + watcher: watcher::Watcher, + queuer: queuer::Queuer, + + /// Which cluster to use first. + cluster_priorities: HashMap, + min_provers: HashMap, + max_provers: HashMap>, + prover_speed: HashMap, + long_pending_duration: chrono::Duration, +} + +struct ProverPodGpu<'a> { + name: &'a str, + pod: &'a Pod, + gpu: Gpu, +} + +impl<'a> ProverPodGpu<'a> { + fn new(name: &'a str, pod: &'a Pod) -> Option> { + PROVER_POD_RE.captures(name).map(|caps| Self { + name, + pod, + gpu: Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(), + }) + } +} + +impl Scaler { + pub fn new( + watcher: watcher::Watcher, + queuer: queuer::Queuer, + config: ProverAutoscalerScalerConfig, + ) -> Self { + config + .protocol_versions + .iter() + .for_each(|(namespace, version)| { + AUTOSCALER_METRICS.prover_protocol_version[&(namespace.clone(), version.clone())] + .set(1); + }); + Self { + namespaces: config.protocol_versions, + watcher, + queuer, + cluster_priorities: config.cluster_priorities, + min_provers: config.min_provers, + max_provers: config.max_provers, + prover_speed: config.prover_speed, + long_pending_duration: chrono::Duration::seconds( + config.long_pending_duration.whole_seconds(), + ), + } + } + + fn convert_to_gpu_pool(&self, namespace: &String, cluster: &Cluster) -> Vec { + let mut gp_map = HashMap::new(); // + let Some(namespace_value) = &cluster.namespaces.get(namespace) else { + // No namespace in config, ignoring. + return vec![]; + }; + + for caps in namespace_value + .deployments + .keys() + .filter_map(|dn| PROVER_DEPLOYMENT_RE.captures(dn)) + { + // Processing only provers. + let gpu = + Gpu::from_str(caps.name("gpu").map_or("l4", |m| m.as_str())).unwrap_or_default(); + let e = gp_map.entry(gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu, + max_pool_size: self + .max_provers + .get(&cluster.name) + .and_then(|inner_map| inner_map.get(&gpu)) + .copied() + .unwrap_or(0), + ..Default::default() + }); + + // Initialize pool only if we have ready deployments. + e.provers.insert(PodStatus::Running, 0); + } + + for ppg in namespace_value + .pods + .iter() + .filter_map(|(pn, pv)| ProverPodGpu::new(pn, pv)) + { + let e = gp_map.entry(ppg.gpu).or_insert(GPUPool { + name: cluster.name.clone(), + gpu: ppg.gpu, + ..Default::default() + }); + let mut status = PodStatus::from_str(&ppg.pod.status).unwrap_or_default(); + if status == PodStatus::Pending + && ppg.pod.changed < Utc::now() - self.long_pending_duration + { + status = PodStatus::LongPending; + } + tracing::info!( + "pod {}: status: {}, real status: {}", + ppg.name, + status, + ppg.pod.status + ); + e.provers.entry(status).and_modify(|n| *n += 1).or_insert(1); + } + + tracing::info!("From pods {:?}", gp_map.sorted_debug()); + + gp_map.into_values().collect() + } + + fn sorted_clusters(&self, namespace: &String, clusters: &Clusters) -> Vec { + let mut gpu_pools: Vec = clusters + .clusters + .values() + .flat_map(|c| self.convert_to_gpu_pool(namespace, c)) + .collect(); + + gpu_pools.sort_by(|a, b| { + a.gpu + .cmp(&b.gpu) // Sort by GPU first. + .then( + a.sum_by_pod_status(PodStatus::NeedToMove) + .cmp(&b.sum_by_pod_status(PodStatus::NeedToMove)), + ) // Sort by need to evict. + .then( + a.sum_by_pod_status(PodStatus::LongPending) + .cmp(&b.sum_by_pod_status(PodStatus::LongPending)), + ) // Sort by long Pending pods. + .then(a.preemtions.cmp(&b.preemtions)) // Sort by preemtions in the cluster. + .then( + self.cluster_priorities + .get(&a.name) + .unwrap_or(&1000) + .cmp(self.cluster_priorities.get(&b.name).unwrap_or(&1000)), + ) // Sort by priority. + .then(b.max_pool_size.cmp(&a.max_pool_size)) // Reverse sort by cluster size. + }); + + gpu_pools + } + + fn speed(&self, gpu: Gpu) -> u64 { + self.prover_speed + .get(&gpu) + .cloned() + .unwrap_or(DEFAULT_SPEED) + .into() + } + + fn provers_to_speed(&self, gpu: Gpu, n: u32) -> u64 { + self.speed(gpu) * n as u64 + } + + fn normalize_queue(&self, gpu: Gpu, queue: u64) -> u64 { + let speed = self.speed(gpu); + // Divide and round up if there's any remainder. + (queue + speed - 1) / speed * speed + } + + fn run(&self, namespace: &String, queue: u64, clusters: &Clusters) -> HashMap { + let sc = self.sorted_clusters(namespace, clusters); + tracing::debug!("Sorted clusters for namespace {}: {:?}", namespace, &sc); + + // Increase queue size, if it's too small, to make sure that required min_provers are + // running. + let queue: u64 = self.min_provers.get(namespace).map_or(queue, |min| { + self.normalize_queue(Gpu::L4, queue) + .max(self.provers_to_speed(Gpu::L4, *min)) + }); + + let mut total: i64 = 0; + let mut provers: HashMap = HashMap::new(); + for c in &sc { + for (status, p) in &c.provers { + match status { + PodStatus::Running | PodStatus::Pending => { + total += self.provers_to_speed(c.gpu, *p) as i64; + provers + .entry(c.to_key()) + .and_modify(|x| *x += p) + .or_insert(*p); + } + _ => (), // Ignore LongPending as not running here. + } + } + } + + // Remove unneeded pods. + if (total as u64) > self.normalize_queue(Gpu::L4, queue) { + for c in sc.iter().rev() { + let mut excess_queue = total as u64 - self.normalize_queue(c.gpu, queue); + let mut excess_provers = (excess_queue / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p < excess_provers { + excess_provers = *p; + excess_queue = *p as u64 * self.speed(c.gpu); + } + *p -= excess_provers; + total -= excess_queue as i64; + if total <= 0 { + break; + }; + } + } + + // Reduce load in over capacity pools. + for c in &sc { + let p = provers.entry(c.to_key()).or_default(); + if c.max_pool_size < *p { + let excess = *p - c.max_pool_size; + total -= excess as i64 * self.speed(c.gpu) as i64; + *p -= excess; + } + } + + tracing::debug!("Queue covered with provers: {}", total); + // Add required provers. + if (total as u64) < queue { + for c in &sc { + let mut required_queue = queue - total as u64; + let mut required_provers = + (self.normalize_queue(c.gpu, required_queue) / self.speed(c.gpu)) as u32; + let p = provers.entry(c.to_key()).or_default(); + if *p + required_provers > c.max_pool_size { + required_provers = c.max_pool_size - *p; + required_queue = required_provers as u64 * self.speed(c.gpu); + } + *p += required_provers; + total += required_queue as i64; + } + } + + tracing::debug!( + "run result for namespace {}: provers {:?}, total: {}", + namespace, + &provers, + total + ); + + provers + } +} + +fn diff( + namespace: &str, + provers: HashMap, + clusters: &Clusters, + requests: &mut HashMap, +) { + provers + .into_iter() + .for_each(|(GPUPoolKey { cluster, gpu }, n)| { + let prover = gpu_to_prover(gpu); + clusters + .clusters + .get(&cluster) + .and_then(|c| c.namespaces.get(namespace)) + .and_then(|ns| ns.deployments.get(&prover)) + .map_or_else( + || { + tracing::error!( + "Wasn't able to find deployment {} in cluster {}, namespace {}", + prover, + cluster, + namespace + ) + }, + |d| { + if d.desired != n as i32 { + requests + .entry(cluster.clone()) + .or_default() + .deployments + .push(ScaleDeploymentRequest { + namespace: namespace.into(), + name: prover.clone(), + size: n as i32, + }); + } + }, + ); + }) +} + +/// is_namespace_running returns true if there are some pods running in it. +fn is_namespace_running(namespace: &str, clusters: &Clusters) -> bool { + clusters + .clusters + .values() + .flat_map(|v| v.namespaces.iter()) + .filter_map(|(k, v)| if k == namespace { Some(v) } else { None }) + .flat_map(|v| v.deployments.values()) + .map( + |d| d.running + d.desired, // If there is something running or expected to run, we + // should re-evaluate the namespace. + ) + .sum::() + > 0 +} + +#[async_trait::async_trait] +impl Task for Scaler { + async fn invoke(&self) -> anyhow::Result<()> { + let queue = self.queuer.get_queue().await.unwrap(); + + let mut scale_requests: HashMap = HashMap::new(); + { + let guard = self.watcher.data.lock().await; // Keeping the lock during all calls of run() for + // consitency. + if let Err(err) = watcher::check_is_ready(&guard.is_ready) { + AUTOSCALER_METRICS.clusters_not_ready.inc(); + tracing::warn!("Skipping Scaler run: {}", err); + return Ok(()); + } + + for (ns, ppv) in &self.namespaces { + let q = queue.queue.get(ppv).cloned().unwrap_or(0); + tracing::debug!("Running eval for namespace {ns} and PPV {ppv} found queue {q}"); + if q > 0 || is_namespace_running(ns, &guard.clusters) { + let provers = self.run(ns, q, &guard.clusters); + for (k, num) in &provers { + AUTOSCALER_METRICS.provers[&(k.cluster.clone(), ns.clone(), k.gpu)] + .set(*num as u64); + } + diff(ns, provers, &guard.clusters, &mut scale_requests); + } + } + } // Unlock self.watcher.data. + + if let Err(err) = self.watcher.send_scale(scale_requests).await { + tracing::error!("Failed scale request: {}", err); + } + + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + cluster_types::{Deployment, Namespace, Pod}, + global::{queuer, watcher}, + }; + + #[tracing_test::traced_test] + #[test] + fn test_run() { + let scaler = Scaler::new( + watcher::Watcher::default(), + queuer::Queuer::default(), + ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + min_provers: [("prover-other".into(), 2)].into(), + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + ..Default::default() + }, + ); + + assert_eq!( + scaler.run( + &"prover".into(), + 1499, + &Clusters { + clusters: [( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + pods: [( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + )] + .into(), + }, + )] + .into(), + }, + )] + .into(), + ..Default::default() + }, + ), + [( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 3, + )] + .into(), + "3 new provers" + ); + assert_eq!( + scaler.run( + &"prover".into(), + 499, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 1, + desired: 1, + }, + )] + .into(), + pods: [( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + )] + .into(), + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 0, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 1, + ) + ] + .into(), + "Preserve running" + ); + } + + #[tracing_test::traced_test] + #[test] + fn test_run_min_provers() { + let scaler = Scaler::new( + watcher::Watcher::default(), + queuer::Queuer::default(), + ProverAutoscalerScalerConfig { + cluster_priorities: [("foo".into(), 0), ("bar".into(), 10)].into(), + min_provers: [("prover".into(), 2)].into(), + max_provers: [ + ("foo".into(), [(Gpu::L4, 100)].into()), + ("bar".into(), [(Gpu::L4, 100)].into()), + ] + .into(), + ..Default::default() + }, + ); + + assert_eq!( + scaler.run( + &"prover".into(), + 10, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment::default(), + )] + .into(), + ..Default::default() + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 0, + ) + ] + .into(), + "Min 2 provers, non running" + ); + assert_eq!( + scaler.run( + &"prover".into(), + 0, + &Clusters { + clusters: [ + ( + "foo".into(), + Cluster { + name: "foo".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 3, + desired: 3, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc3".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ) + ] + .into(), + }, + )] + .into(), + }, + ), + ( + "bar".into(), + Cluster { + name: "bar".into(), + namespaces: [( + "prover".into(), + Namespace { + deployments: [( + "circuit-prover-gpu".into(), + Deployment { + running: 2, + desired: 2, + }, + )] + .into(), + pods: [ + ( + "circuit-prover-gpu-7c5f8fc747-gmtcr".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ), + ( + "circuit-prover-gpu-7c5f8fc747-gmtc2".into(), + Pod { + status: "Running".into(), + ..Default::default() + }, + ) + ] + .into(), + }, + )] + .into(), + }, + ) + ] + .into(), + ..Default::default() + }, + ), + [ + ( + GPUPoolKey { + cluster: "foo".into(), + gpu: Gpu::L4, + }, + 2, + ), + ( + GPUPoolKey { + cluster: "bar".into(), + gpu: Gpu::L4, + }, + 0, + ) + ] + .into(), + "Min 2 provers, 5 running" + ); + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/global/watcher.rs b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs new file mode 100644 index 00000000000..6e02c0fe2fd --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/global/watcher.rs @@ -0,0 +1,203 @@ +use std::{collections::HashMap, sync::Arc}; + +use anyhow::{anyhow, Context, Ok, Result}; +use futures::future; +use reqwest::{ + header::{HeaderMap, HeaderValue, CONTENT_TYPE}, + Method, +}; +use tokio::sync::Mutex; +use url::Url; +use zksync_utils::http_with_retries::send_request_with_retries; + +use crate::{ + agent::{ScaleRequest, ScaleResponse}, + cluster_types::{Cluster, Clusters}, + metrics::{AUTOSCALER_METRICS, DEFAULT_ERROR_CODE}, + task_wiring::Task, +}; + +const MAX_RETRIES: usize = 5; + +#[derive(Default)] +pub struct WatchedData { + pub clusters: Clusters, + pub is_ready: Vec, +} + +pub fn check_is_ready(v: &Vec) -> Result<()> { + for b in v { + if !b { + return Err(anyhow!("Clusters data is not ready")); + } + } + Ok(()) +} + +#[derive(Default, Clone)] +pub struct Watcher { + /// List of base URLs of all agents. + pub cluster_agents: Vec>, + pub data: Arc>, +} + +impl Watcher { + pub fn new(agent_urls: Vec) -> Self { + let size = agent_urls.len(); + Self { + cluster_agents: agent_urls + .into_iter() + .map(|u| { + Arc::new( + Url::parse(&u) + .unwrap_or_else(|e| panic!("Unparsable Agent URL {}: {}", u, e)), + ) + }) + .collect(), + data: Arc::new(Mutex::new(WatchedData { + clusters: Clusters::default(), + is_ready: vec![false; size], + })), + } + } + + pub async fn send_scale(&self, requests: HashMap) -> anyhow::Result<()> { + let id_requests: HashMap; + { + // Convert cluster names into ids. Holding the data lock. + let guard = self.data.lock().await; + id_requests = requests + .into_iter() + .filter_map(|(cluster, scale_request)| { + guard.clusters.agent_ids.get(&cluster).map_or_else( + || { + tracing::error!("Failed to find id for cluster {}", cluster); + None + }, + |id| Some((*id, scale_request)), + ) + }) + .collect(); + } + + let handles: Vec<_> = id_requests + .into_iter() + .map(|(id, sr)| { + let url: String = self.cluster_agents[id] + .clone() + .join("/scale") + .unwrap() + .to_string(); + tracing::debug!("Sending scale request to {}, data: {:?}.", url, sr); + tokio::spawn(async move { + let mut headers = HeaderMap::new(); + headers.insert(CONTENT_TYPE, HeaderValue::from_static("application/json")); + let response = send_request_with_retries( + &url, + MAX_RETRIES, + Method::POST, + Some(headers), + Some(serde_json::to_vec(&sr)?), + ) + .await; + let response = response.map_err(|err| { + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })?; + AUTOSCALER_METRICS.calls[&(url, response.status().as_u16())].inc(); + let response = response + .json::() + .await + .context("Failed to read response as json"); + Ok((id, response)) + }) + }) + .collect(); + + future::try_join_all( + future::join_all(handles) + .await + .into_iter() + .map(|h| async move { + let (id, res) = h??; + + let errors: Vec<_> = res + .expect("failed to do request to Agent") + .scale_result + .iter() + .filter_map(|e| { + if !e.is_empty() { + Some(format!("Agent {} failed to scale: {}", id, e)) + } else { + None + } + }) + .collect(); + + if !errors.is_empty() { + return Err(anyhow!(errors.join(";"))); + } + Ok(()) + }) + .collect::>(), + ) + .await?; + + Ok(()) + } +} + +#[async_trait::async_trait] +impl Task for Watcher { + async fn invoke(&self) -> anyhow::Result<()> { + let handles: Vec<_> = self + .cluster_agents + .clone() + .into_iter() + .enumerate() + .map(|(i, a)| { + tracing::debug!("Getting cluster data from agent {}.", a); + tokio::spawn(async move { + let url: String = a + .clone() + .join("/cluster") + .context("Failed to join URL with /cluster")? + .to_string(); + let response = + send_request_with_retries(&url, MAX_RETRIES, Method::GET, None, None).await; + + let response = response.map_err(|err| { + // TODO: refactor send_request_with_retries to return status. + AUTOSCALER_METRICS.calls[&(url.clone(), DEFAULT_ERROR_CODE)].inc(); + anyhow::anyhow!("Failed fetching cluster from url: {url}: {err:?}") + })?; + AUTOSCALER_METRICS.calls[&(url, response.status().as_u16())].inc(); + let response = response + .json::() + .await + .context("Failed to read response as json"); + Ok((i, response)) + }) + }) + .collect(); + + future::try_join_all( + future::join_all(handles) + .await + .into_iter() + .map(|h| async move { + let (i, res) = h??; + let c = res?; + let mut guard = self.data.lock().await; + guard.clusters.agent_ids.insert(c.name.clone(), i); + guard.clusters.clusters.insert(c.name.clone(), c); + guard.is_ready[i] = true; + Ok(()) + }) + .collect::>(), + ) + .await?; + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs new file mode 100644 index 00000000000..0804b9eaa40 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/mod.rs @@ -0,0 +1,5 @@ +pub use scaler::Scaler; +pub use watcher::Watcher; + +mod scaler; +mod watcher; diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs new file mode 100644 index 00000000000..5e6f56aacc9 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/scaler.rs @@ -0,0 +1,42 @@ +use k8s_openapi::api; +use kube::api::{Api, Patch, PatchParams}; + +#[derive(Clone)] +pub struct Scaler { + pub client: kube::Client, + dry_run: bool, +} + +impl Scaler { + pub fn new(client: kube::Client, dry_run: bool) -> Self { + Self { client, dry_run } + } + + pub async fn scale(&self, namespace: &str, name: &str, size: i32) -> anyhow::Result<()> { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + + let patch = serde_json::json!({ + "apiVersion": "apps/v1", + "kind": "Deployment", + "spec": { + "replicas": size + } + }); + + if self.dry_run { + tracing::info!( + "Dry run of scaled deployment/{} to {} replica(s).", + name, + size + ); + return Ok(()); + } + + let pp = PatchParams::default(); + deployments.patch(name, &pp, &Patch::Merge(patch)).await?; + tracing::info!("Scaled deployment/{} to {} replica(s).", name, size); + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs new file mode 100644 index 00000000000..f94dfc3704f --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/k8s/watcher.rs @@ -0,0 +1,133 @@ +use std::{collections::HashMap, sync::Arc}; + +use chrono::Utc; +use futures::{stream, StreamExt, TryStreamExt}; +use k8s_openapi::api; +use kube::{ + api::{Api, ResourceExt}, + runtime::{watcher, WatchStreamExt}, +}; +use tokio::sync::Mutex; + +use crate::cluster_types::{Cluster, Deployment, Namespace, Pod}; + +#[derive(Clone)] +pub struct Watcher { + pub client: kube::Client, + pub cluster: Arc>, +} + +impl Watcher { + pub fn new(client: kube::Client, cluster_name: String, namespaces: Vec) -> Self { + let mut ns = HashMap::new(); + namespaces.into_iter().for_each(|n| { + ns.insert(n, Namespace::default()); + }); + + Self { + client, + cluster: Arc::new(Mutex::new(Cluster { + name: cluster_name, + namespaces: ns, + })), + } + } + + pub async fn run(self) -> anyhow::Result<()> { + // TODO: add actual metrics + + // TODO: watch for a list of namespaces, get: + // - deployments (name, running, desired) [done] + // - pods (name, parent deployment, statuses, when the last status change) [~done] + // - events (number of scheduling failures in last N seconds, which deployments) + // - events (preemptions, which deployment, when, how many) + // - pool size from GCP (name, size, which GPU) + let mut watchers = vec![]; + for namespace in self.cluster.lock().await.namespaces.keys() { + let deployments: Api = + Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(deployments, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Deploy) + .boxed(), + ); + + let pods: Api = Api::namespaced(self.client.clone(), namespace); + watchers.push( + watcher(pods, watcher::Config::default()) + .default_backoff() + .applied_objects() + .map_ok(Watched::Pod) + .boxed(), + ); + } + // select on applied events from all watchers + let mut combo_stream = stream::select_all(watchers); + // SelectAll Stream elements must have the same Item, so all packed in this: + #[allow(clippy::large_enum_variant)] + enum Watched { + Deploy(api::apps::v1::Deployment), + Pod(api::core::v1::Pod), + } + while let Some(o) = combo_stream.try_next().await? { + match o { + Watched::Deploy(d) => { + let namespace = match d.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let dep = v + .deployments + .entry(d.name_any()) + .or_insert(Deployment::default()); + let nums = d.status.clone().unwrap_or_default(); + dep.running = nums.available_replicas.unwrap_or_default(); + dep.desired = nums.replicas.unwrap_or_default(); + + tracing::info!( + "Got deployment: {}, size: {}/{} un {}", + d.name_any(), + nums.available_replicas.unwrap_or_default(), + nums.replicas.unwrap_or_default(), + nums.unavailable_replicas.unwrap_or_default(), + ) + } + Watched::Pod(p) => { + let namespace = match p.namespace() { + Some(n) => n.to_string(), + None => continue, + }; + let mut cluster = self.cluster.lock().await; + let v = cluster.namespaces.get_mut(&namespace).unwrap(); + let pod = v.pods.entry(p.name_any()).or_insert(Pod::default()); + pod.owner = p + .owner_references() + .iter() + .map(|x| format!("{}/{}", x.kind.clone(), x.name.clone())) + .collect::>() + .join(":"); + // TODO: Collect replica sets to match deployments and pods. + let phase = p + .status + .clone() + .unwrap_or_default() + .phase + .unwrap_or_default(); + if phase != pod.status { + // TODO: try to get an idea how to set correct value on restart. + pod.changed = Utc::now(); + } + pod.status = phase; + + tracing::info!("Got pod: {}", p.name_any()) + } + } + } + + Ok(()) + } +} diff --git a/prover/crates/bin/prover_autoscaler/src/lib.rs b/prover/crates/bin/prover_autoscaler/src/lib.rs new file mode 100644 index 00000000000..0b0d704c907 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/lib.rs @@ -0,0 +1,6 @@ +pub mod agent; +pub(crate) mod cluster_types; +pub mod global; +pub mod k8s; +pub(crate) mod metrics; +pub mod task_wiring; diff --git a/prover/crates/bin/prover_autoscaler/src/main.rs b/prover/crates/bin/prover_autoscaler/src/main.rs new file mode 100644 index 00000000000..45e476079a5 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/main.rs @@ -0,0 +1,146 @@ +use std::time::Duration; + +use anyhow::Context; +use structopt::StructOpt; +use tokio::{ + sync::{oneshot, watch}, + task::JoinHandle, +}; +use zksync_core_leftovers::temp_config_store::read_yaml_repr; +use zksync_protobuf_config::proto::prover_autoscaler; +use zksync_prover_autoscaler::{ + agent, + global::{self}, + k8s::{Scaler, Watcher}, + task_wiring::TaskRunner, +}; +use zksync_utils::wait_for_tasks::ManagedTasks; +use zksync_vlog::prometheus::PrometheusExporterConfig; + +/// Represents the sequential number of the Prover Autoscaler type. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize)] +pub enum AutoscalerType { + Scaler, + Agent, +} + +impl std::str::FromStr for AutoscalerType { + type Err = String; + + fn from_str(s: &str) -> Result { + match s { + "scaler" => Ok(AutoscalerType::Scaler), + "agent" => Ok(AutoscalerType::Agent), + other => Err(format!("{} is not a valid AutoscalerType", other)), + } + } +} + +#[derive(Debug, StructOpt)] +#[structopt(name = "Prover Autoscaler", about = "Run Prover Autoscaler components")] +struct Opt { + /// Prover Autoscaler can run Agent or Scaler type. + /// + /// Specify `agent` or `scaler` + #[structopt(short, long, default_value = "agent")] + job: AutoscalerType, + /// Name of the cluster Agent is watching. + #[structopt(long)] + cluster_name: Option, + /// Path to the configuration file. + #[structopt(long)] + config_path: std::path::PathBuf, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let opt = Opt::from_args(); + let general_config = + read_yaml_repr::(&opt.config_path) + .context("general config")?; + let observability_config = general_config + .observability + .context("observability config")?; + let _observability_guard = observability_config.install()?; + // That's unfortunate that there are at least 3 different Duration in rust and we use all 3 in this repo. + // TODO: Consider updating zksync_protobuf to support std::time::Duration. + let graceful_shutdown_timeout = general_config.graceful_shutdown_timeout.unsigned_abs(); + + let (stop_signal_sender, stop_signal_receiver) = oneshot::channel(); + let mut stop_signal_sender = Some(stop_signal_sender); + ctrlc::set_handler(move || { + if let Some(sender) = stop_signal_sender.take() { + sender.send(()).ok(); + } + }) + .context("Error setting Ctrl+C handler")?; + + let (stop_sender, stop_receiver) = watch::channel(false); + + let _ = rustls::crypto::ring::default_provider().install_default(); + let client = kube::Client::try_default().await?; + + let mut tasks = vec![]; + + match opt.job { + AutoscalerType::Agent => { + let cluster = opt + .cluster_name + .context("cluster_name is required for Agent")?; + tracing::info!("Starting ProverAutoscaler Agent for cluster {}", cluster); + let agent_config = general_config.agent_config.context("agent_config")?; + let exporter_config = PrometheusExporterConfig::pull(agent_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + + // TODO: maybe get cluster name from curl -H "Metadata-Flavor: Google" + // http://metadata.google.internal/computeMetadata/v1/instance/attributes/cluster-name + let watcher = Watcher::new(client.clone(), cluster, agent_config.namespaces); + let scaler = Scaler::new(client, agent_config.dry_run); + tasks.push(tokio::spawn(watcher.clone().run())); + tasks.push(tokio::spawn(agent::run_server( + agent_config.http_port, + watcher, + scaler, + stop_receiver.clone(), + ))) + } + AutoscalerType::Scaler => { + tracing::info!("Starting ProverAutoscaler Scaler"); + let scaler_config = general_config.scaler_config.context("scaler_config")?; + let interval = scaler_config.scaler_run_interval.unsigned_abs(); + let exporter_config = PrometheusExporterConfig::pull(scaler_config.prometheus_port); + tasks.push(tokio::spawn(exporter_config.run(stop_receiver.clone()))); + let watcher = global::watcher::Watcher::new(scaler_config.agents.clone()); + let queuer = global::queuer::Queuer::new(scaler_config.prover_job_monitor_url.clone()); + let scaler = global::scaler::Scaler::new(watcher.clone(), queuer, scaler_config); + tasks.extend(get_tasks(watcher, scaler, interval, stop_receiver)?); + } + } + + let mut tasks = ManagedTasks::new(tasks); + + tokio::select! { + _ = tasks.wait_single() => {}, + _ = stop_signal_receiver => { + tracing::info!("Stop signal received, shutting down"); + } + } + stop_sender.send(true).ok(); + tasks.complete(graceful_shutdown_timeout).await; + + Ok(()) +} + +fn get_tasks( + watcher: global::watcher::Watcher, + scaler: global::scaler::Scaler, + interval: Duration, + stop_receiver: watch::Receiver, +) -> anyhow::Result>>> { + let mut task_runner = TaskRunner::default(); + + task_runner.add("Watcher", interval, watcher); + task_runner.add("Scaler", interval, scaler); + + Ok(task_runner.spawn(stop_receiver)) +} diff --git a/prover/crates/bin/prover_autoscaler/src/metrics.rs b/prover/crates/bin/prover_autoscaler/src/metrics.rs new file mode 100644 index 00000000000..d94ac8b97e9 --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/metrics.rs @@ -0,0 +1,20 @@ +use vise::{Counter, Gauge, LabeledFamily, Metrics}; +use zksync_config::configs::prover_autoscaler::Gpu; + +pub const DEFAULT_ERROR_CODE: u16 = 500; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "autoscaler")] +pub(crate) struct AutoscalerMetrics { + #[metrics(labels = ["target_namespace", "protocol_version"])] + pub prover_protocol_version: LabeledFamily<(String, String), Gauge, 2>, + #[metrics(labels = ["target_cluster", "target_namespace", "gpu"])] + pub provers: LabeledFamily<(String, String, Gpu), Gauge, 3>, + pub clusters_not_ready: Counter, + #[metrics(labels = ["target", "status"])] + pub calls: LabeledFamily<(String, u16), Counter, 2>, + // TODO: count of command send succes/fail +} + +#[vise::register] +pub(crate) static AUTOSCALER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/bin/prover_autoscaler/src/task_wiring.rs b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs new file mode 100644 index 00000000000..9b60145ad9e --- /dev/null +++ b/prover/crates/bin/prover_autoscaler/src/task_wiring.rs @@ -0,0 +1,72 @@ +use std::time::Duration; + +use anyhow::Context; +use tracing::Instrument; + +/// Task trait to be run in ProverJobMonitor. +#[async_trait::async_trait] +pub trait Task { + async fn invoke(&self) -> anyhow::Result<()>; +} + +/// Wrapper for Task with a periodic interface. Holds information about the task and provides DB connectivity. +struct PeriodicTask { + job: Box, + name: String, + interval: Duration, +} + +impl PeriodicTask { + async fn run( + &self, + mut stop_receiver: tokio::sync::watch::Receiver, + ) -> anyhow::Result<()> { + tracing::info!( + "Started Task {} with run interval: {:?}", + self.name, + self.interval + ); + + let mut interval = tokio::time::interval(self.interval); + + while !*stop_receiver.borrow_and_update() { + interval.tick().await; + self.job + .invoke() + .instrument(tracing::info_span!("run", service_name = %self.name)) + .await + .context("failed to invoke task")?; + } + tracing::info!("Stop signal received; Task {} is shut down", self.name); + Ok(()) + } +} + +/// Wrapper on a vector of task. Makes adding/spawning tasks and sharing resources ergonomic. +#[derive(Default)] +pub struct TaskRunner { + tasks: Vec, +} + +impl TaskRunner { + pub fn add(&mut self, name: &str, interval: Duration, job: T) { + self.tasks.push(PeriodicTask { + name: name.into(), + interval, + job: Box::new(job), + }); + } + + pub fn spawn( + self, + stop_receiver: tokio::sync::watch::Receiver, + ) -> Vec>> { + self.tasks + .into_iter() + .map(|task| { + let receiver = stop_receiver.clone(); + tokio::spawn(async move { task.run(receiver).await }) + }) + .collect() + } +} diff --git a/prover/crates/bin/witness_generator/README.md b/prover/crates/bin/witness_generator/README.md index dc476ca44fc..6063c29b334 100644 --- a/prover/crates/bin/witness_generator/README.md +++ b/prover/crates/bin/witness_generator/README.md @@ -1,9 +1,5 @@ # WitnessGenerator -Please read this -[doc](https://www.notion.so/matterlabs/Draft-FRI-Prover-Integration-Prover-Shadowing-c4b1373786eb43779a93118be4be5d99) -for rationale of this binary, alongside the existing one in zk-core. - The component is responsible for generating prover jobs and saving artifacts needed for the next round of proof aggregation. That is, every aggregation round needs two sets of input: diff --git a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs index 23ae1b0f2af..31dc5481410 100644 --- a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs @@ -5,7 +5,7 @@ use std::{ }; use circuit_definitions::{ - circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, + circuit_definitions::base_layer::ZkSyncBaseLayerStorage, encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, }; @@ -21,7 +21,7 @@ use zksync_multivm::{ zk_evm_latest::ethereum_types::Address, }; use zksync_object_store::ObjectStore; -use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; +use zksync_prover_fri_types::keys::ClosedFormInputKey; use zksync_prover_interface::inputs::WitnessInputData; use zksync_system_constants::BOOTLOADER_ADDRESS; use zksync_types::L1BatchNumber; @@ -31,8 +31,7 @@ use crate::{ rounds::basic_circuits::Witness, storage_oracle::StorageOracle, utils::{ - expand_bootloader_contents, save_circuit, save_ram_premutation_queue_witness, - ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, + expand_bootloader_contents, save_circuit, ClosedFormInputWrapper, KZG_TRUSTED_SETUP_FILE, }, witness::WitnessStorage, }; @@ -64,17 +63,38 @@ pub(super) async fn generate_witness( let (circuit_sender, mut circuit_receiver) = tokio::sync::mpsc::channel(1); let (queue_sender, mut queue_receiver) = tokio::sync::mpsc::channel(1); - let (ram_permutation_queue_sender, mut ram_permutation_queue_receiver) = - tokio::sync::mpsc::channel(1); let make_circuits_span = tracing::info_span!("make_circuits"); let make_circuits_span_copy = make_circuits_span.clone(); + + use std::{sync::mpsc::sync_channel, thread}; + let (artifacts_sender, artifacts_receiver) = sync_channel(1); + + let artifacts_receiver_handle = thread::spawn(move || { + let span = tracing::info_span!(parent: make_circuits_span_copy, "make_circuits_blocking"); + + while let Ok(artifact) = artifacts_receiver.recv() { + match artifact { + WitnessGenerationArtifact::BaseLayerCircuit(circuit) => { + let parent_span = span.clone(); + tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { + circuit_sender + .blocking_send(circuit) + .expect("failed to send circuit from harness"); + }); + } + WitnessGenerationArtifact::RecursionQueue((a, b, c)) => queue_sender + .blocking_send((a as u8, b, c)) + .expect("failed to send recursion queue from harness"), + _ => {} + } + } + }); + // Blocking call from harness that does the CPU heavy lifting. // Provides circuits and recursion queue via callback functions and returns scheduler witnesses. // Circuits are "streamed" one by one as they're being generated. let make_circuits_handle = tokio::task::spawn_blocking(move || { - let span = tracing::info_span!(parent: make_circuits_span_copy, "make_circuits_blocking"); - let witness_storage = WitnessStorage::new(input.vm_run_data.witness_block_state); let storage_view = StorageView::new(witness_storage).to_rc_ptr(); @@ -91,28 +111,10 @@ pub(super) async fn generate_witness( .to_str() .expect("Path to KZG trusted setup is not a UTF-8 string"); - let artifacts_callback = |artifact: WitnessGenerationArtifact| match artifact { - WitnessGenerationArtifact::BaseLayerCircuit(circuit) => { - let parent_span = span.clone(); - tracing::info_span!(parent: parent_span, "send_circuit").in_scope(|| { - circuit_sender - .blocking_send(circuit) - .expect("failed to send circuit from harness"); - }); - } - WitnessGenerationArtifact::RecursionQueue((a, b, c)) => queue_sender - .blocking_send((a as u8, b, c)) - .expect("failed to send recursion queue from harness"), - a @ WitnessGenerationArtifact::MemoryQueueWitness(_) => { - let parent_span = span.clone(); - tracing::info_span!(parent: parent_span, "send_ram_permutation_queue_witness") - .in_scope(|| { - ram_permutation_queue_sender - .blocking_send(a) - .expect("failed to send ram permutation queue sitness from harness"); - }); - } - }; + let evm_emulator_code_hash = input.vm_run_data.evm_emulator_code_hash; + // By convention, default AA is used instead of the EVM emulator if the latter is disabled. + let evm_emulator_code_hash = + evm_emulator_code_hash.unwrap_or(input.vm_run_data.default_account_code_hash); let (scheduler_witness, block_witness) = zkevm_test_harness::external_calls::run( Address::zero(), @@ -121,17 +123,16 @@ pub(super) async fn generate_witness( bootloader_contents, false, input.vm_run_data.default_account_code_hash, - // NOTE: this will be evm_simulator_code_hash in future releases - input.vm_run_data.default_account_code_hash, + evm_emulator_code_hash, input.vm_run_data.used_bytecodes, Vec::default(), MAX_CYCLES_FOR_TX as usize, geometry_config, storage_oracle, tree, - path, + path.to_owned(), input.eip_4844_blobs.blobs(), - artifacts_callback, + artifacts_sender, ); (scheduler_witness, block_witness) }) @@ -150,8 +151,6 @@ pub(super) async fn generate_witness( // If the order is tampered with, proving will fail (as the proof would be computed for a different sequence of instruction). let mut circuit_sequence = 0; - let mut ram_circuit_sequence = 0; - while let Some(circuit) = circuit_receiver .recv() .instrument(tracing::info_span!("wait_for_circuit")) @@ -166,26 +165,9 @@ pub(super) async fn generate_witness( .await .expect("failed to get permit for running save circuit task"); - let partial_circuit_aux_data = match &circuit { - ZkSyncBaseLayerCircuit::RAMPermutation(_) => { - let circuit_subsequence_number = ram_circuit_sequence; - ram_circuit_sequence += 1; - Some(CircuitAuxData { - circuit_subsequence_number, - }) - } - _ => None, - }; - save_circuit_handles.push(tokio::task::spawn(async move { - let (circuit_id, circuit_url) = save_circuit( - block_number, - circuit, - sequence, - partial_circuit_aux_data, - object_store, - ) - .await; + let (circuit_id, circuit_url) = + save_circuit(block_number, circuit, sequence, object_store).await; drop(permit); (circuit_id, circuit_url) })); @@ -193,57 +175,6 @@ pub(super) async fn generate_witness( } .instrument(save_circuits_span); - let mut save_ram_queue_witness_handles = vec![]; - - let save_ram_queue_witness_span = tracing::info_span!("save_circuits"); - - // Future which receives part of RAM permutation circuits witnesses and saves them async. - // Uses semaphore because these artifacts are of significant size - let ram_queue_witness_receiver_handle = async { - let mut sorted_sequence = 0; - let mut unsorted_sequence = 0; - - while let Some(witness_artifact) = ram_permutation_queue_receiver - .recv() - .instrument(tracing::info_span!("wait_for_ram_witness")) - .await - { - let object_store = object_store.clone(); - let semaphore = semaphore.clone(); - let permit = semaphore - .acquire_owned() - .await - .expect("failed to get permit for running save ram permutation queue witness task"); - let (is_sorted, witness, sequence) = match witness_artifact { - WitnessGenerationArtifact::MemoryQueueWitness((witness, sorted)) => { - let sequence = if sorted { - let sequence = sorted_sequence; - sorted_sequence += 1; - sequence - } else { - let sequence = unsorted_sequence; - unsorted_sequence += 1; - sequence - }; - (sorted, witness, sequence) - } - _ => panic!("Invalid artifact received"), - }; - save_ram_queue_witness_handles.push(tokio::task::spawn(async move { - let _ = save_ram_premutation_queue_witness( - block_number, - sequence, - is_sorted, - witness, - object_store, - ) - .await; - drop(permit); - })); - } - } - .instrument(save_ram_queue_witness_span); - let mut save_queue_handles = vec![]; let save_queues_span = tracing::info_span!("save_queues"); @@ -269,11 +200,10 @@ pub(super) async fn generate_witness( } .instrument(save_queues_span); - let (witnesses, _, _, _) = tokio::join!( + let (witnesses, _, _) = tokio::join!( make_circuits_handle, circuit_receiver_handle, - queue_receiver_handle, - ram_queue_witness_receiver_handle + queue_receiver_handle ); let (mut scheduler_witness, block_aux_witness) = witnesses.unwrap(); @@ -298,11 +228,7 @@ pub(super) async fn generate_witness( .filter(|(circuit_id, _, _)| circuits_present.contains(circuit_id)) .collect(); - let _: Vec<_> = futures::future::join_all(save_ram_queue_witness_handles) - .await - .into_iter() - .map(|result| result.expect("failed to save ram permutation queue witness")) - .collect(); + artifacts_receiver_handle.join().unwrap(); scheduler_witness.previous_block_meta_hash = input.previous_batch_metadata.meta_hash.0; scheduler_witness.previous_block_aux_hash = input.previous_batch_metadata.aux_hash.0; diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index 8524bdae9ff..ea631f19cd8 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -3,10 +3,7 @@ use std::{ sync::Arc, }; -use circuit_definitions::{ - circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, - encodings::memory_query::MemoryQueueStateWitnesses, -}; +use circuit_definitions::circuit_definitions::base_layer::ZkSyncBaseLayerCircuit; use once_cell::sync::Lazy; use zkevm_test_harness::boojum::field::goldilocks::GoldilocksField; use zksync_multivm::utils::get_used_bootloader_memory_bytes; @@ -24,8 +21,8 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::scheduler::input::SchedulerCircuitInstanceWitness, }, - keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey, RamPermutationQueueWitnessKey}, - CircuitAuxData, CircuitWrapper, FriProofWrapper, RamPermutationQueueWitness, + keys::{AggregationsKey, ClosedFormInputKey, FriCircuitKey}, + CircuitWrapper, FriProofWrapper, }; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber, ProtocolVersionId, U256}; @@ -121,7 +118,6 @@ pub async fn save_circuit( block_number: L1BatchNumber, circuit: ZkSyncBaseLayerCircuit, sequence_number: usize, - aux_data_for_partial_circuit: Option, object_store: Arc, ) -> (u8, String) { let circuit_id = circuit.numeric_circuit_type(); @@ -133,43 +129,12 @@ pub async fn save_circuit( depth: 0, }; - let blob_url = if let Some(aux_data_for_partial_circuit) = aux_data_for_partial_circuit { - object_store - .put( - circuit_key, - &CircuitWrapper::BasePartial((circuit, aux_data_for_partial_circuit)), - ) - .await - .unwrap() - } else { - object_store - .put(circuit_key, &CircuitWrapper::Base(circuit)) - .await - .unwrap() - }; - (circuit_id, blob_url) -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number) -)] -pub async fn save_ram_premutation_queue_witness( - block_number: L1BatchNumber, - circuit_subsequence_number: usize, - is_sorted: bool, - witness: MemoryQueueStateWitnesses, - object_store: Arc, -) -> String { - let witness_key = RamPermutationQueueWitnessKey { - block_number, - circuit_subsequence_number, - is_sorted, - }; - object_store - .put(witness_key, &RamPermutationQueueWitness { witness }) + let blob_url = object_store + .put(circuit_key, &CircuitWrapper::Base(circuit)) .await - .unwrap() + .unwrap(); + + (circuit_id, blob_url) } #[tracing::instrument( diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 6225943e3cd..ab3b115bc63 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -470,6 +470,7 @@ impl Keystore { } /// Async loads mapping of all circuits to setup key, if successful + #[cfg(feature = "gpu")] pub async fn load_all_setup_key_mapping( &self, ) -> anyhow::Result>> { diff --git a/prover/crates/lib/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs index 4a8a1b3e406..37e004d54ec 100644 --- a/prover/crates/lib/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -28,8 +28,8 @@ pub mod keys; pub mod queue; // THESE VALUES SHOULD BE UPDATED ON ANY PROTOCOL UPGRADE OF PROVERS -pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version24; -pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(2); +pub const PROVER_PROTOCOL_VERSION: ProtocolVersionId = ProtocolVersionId::Version25; +pub const PROVER_PROTOCOL_PATCH: VersionPatch = VersionPatch(0); pub const PROVER_PROTOCOL_SEMANTIC_VERSION: ProtocolSemanticVersion = ProtocolSemanticVersion { minor: PROVER_PROTOCOL_VERSION, patch: PROVER_PROTOCOL_PATCH, diff --git a/prover/docs/03_launch.md b/prover/docs/03_launch.md index 203fb6e8cec..0465d888f61 100644 --- a/prover/docs/03_launch.md +++ b/prover/docs/03_launch.md @@ -47,7 +47,7 @@ We will be running a bunch of binaries, it's recommended to run each in a separa ### Server ``` -zk server --components=api,tree,eth,state_keeper,housekeeper,tee_verifier_input_producer,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip +zk server --components=api,tree,eth,state_keeper,housekeeper,commitment_generator,da_dispatcher,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip ``` ### Proof data handler diff --git a/prover/docs/05_proving_batch.md b/prover/docs/05_proving_batch.md index e09a44cb0ff..c35de975bf7 100644 --- a/prover/docs/05_proving_batch.md +++ b/prover/docs/05_proving_batch.md @@ -14,17 +14,25 @@ GPU, which requires an NVIDIA A100 80GB GPU. ### Prerequisites -First of all, you need to install CUDA drivers, all other things will be dealt with by `zk_inception` and `prover_cli` -tools. For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). +First of all, you need to install CUDA drivers, all other things will be dealt with by `zkstack` and `prover_cli` tools. +For that, check the following [guide](./02_setup.md)(you can skip bellman-cuda step). Install the prerequisites, which you can find [here](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/setup-dev.md). Note, that if you are not using Google VM instance, you also need to install [gcloud](https://cloud.google.com/sdk/docs/install#deb). -Now, you can use `zk_inception` and `prover_cli` tools for setting up the env and running prover subsystem. +Now, you can use `zkstack` and `prover_cli` tools for setting up the env and running prover subsystem. -```shell -cargo +nightly-2024-08-01 install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor prover_cli --force +First, install `zkstackup` with: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +Then install the most recent version of `zkstack` with: + +```bash +zkstackup ``` ## Initializing system @@ -33,14 +41,14 @@ After you have installed the tool, you can create ecosystem(you need to run only running: ```shell -zk_inception ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true +zkstack ecosystem create --l1-network=localhost --prover-mode=gpu --wallet-creation=localhost --l1-batch-commit-data-generator-mode=rollup --start-containers=true ``` The command will create the ecosystem and all the necessary components for the prover subsystem. You can leave default values for all the prompts you will see Now, you need to initialize the prover subsystem by running: ```shell -zk_inception prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false +zkstack prover init --shall-save-to-public-bucket=false --setup-database=true --use-default=true --dont-drop=false ``` For prompts you can leave default values as well. @@ -87,13 +95,23 @@ After you have the data, you need to prepare the system to run the batch. So, da the protocol version it should use. You can do that with running ```shell -zk_supervisor prover-version +zkstack dev prover info ``` Example output: ```shell -Current protocol version found in zksync-era: 0.24.2, snark_wrapper: "0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2" +=============================== + +Current prover setup information: + +Protocol version: 0.24.2 + +Snark wrapper: 0x14f97b81e54b35fe673d8708cc1a19e1ea5b5e348e12d31e39824ed4f42bbca2 + +Database URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_era + +=============================== ``` This command will provide you with the information about the semantic protocol version(you need to know only minor and @@ -118,7 +136,7 @@ prover_cli insert-batch --number= --version==13.7.0": - version "22.0.0" - resolved "https://registry.yarnpkg.com/@types/node/-/node-22.0.0.tgz#04862a2a71e62264426083abe1e27e87cac05a30" - integrity sha512-VT7KSYudcPOzP5Q0wfbowyNLaVR8QWUdw+088uFWwfvpY6uCWaXpqV6ieLAu9WBcnTa7H4Z5RLK8I5t2FuOcqw== + version "20.12.7" + resolved "https://registry.yarnpkg.com/@types/node/-/node-20.12.7.tgz#04080362fa3dd6c5822061aa3124f5c152cff384" + integrity sha512-wq0cICSkRLVaf3UGLMGItu/PtdY7oaXaI/RVU+xliKVOtRna3PRY57ZDfztpDL0n11vfymMUnXv8QwYCO7L1wg== dependencies: - undici-types "~6.11.1" + undici-types "~5.26.4" "@types/node@11.11.6": version "11.11.6" @@ -2720,9 +2951,9 @@ integrity sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw== "@types/node@^18.19.15": - version "18.19.42" - resolved "https://registry.yarnpkg.com/@types/node/-/node-18.19.42.tgz#b54ed4752c85427906aab40917b0f7f3d724bf72" - integrity sha512-d2ZFc/3lnK2YCYhos8iaNIYu9Vfhr92nHiyJHRltXWjXUBjEE+A4I58Tdbnw4VhggSW+2j5y5gTrLs4biNnubg== + version "18.19.31" + resolved "https://registry.yarnpkg.com/@types/node/-/node-18.19.31.tgz#b7d4a00f7cb826b60a543cebdbda5d189aaecdcd" + integrity sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA== dependencies: undici-types "~5.26.4" @@ -2739,9 +2970,9 @@ "@types/node" "*" "@types/pg@^8.10.3": - version "8.11.6" - resolved "https://registry.yarnpkg.com/@types/pg/-/pg-8.11.6.tgz#a2d0fb0a14b53951a17df5197401569fb9c0c54b" - integrity sha512-/2WmmBXHLsfRqzfHW7BNZ8SbYzE8OSk7i3WjFYvfgRHj7S1xj+16Je5fUKv3lVdVzk/zn9TXOqf+avFCFIE0yQ== + version "8.11.5" + resolved "https://registry.yarnpkg.com/@types/pg/-/pg-8.11.5.tgz#a1ffb4dc4a46a83bda096cb298051a5b171de167" + integrity sha512-2xMjVviMxneZHDHX5p5S6tsRRs7TpDHeeK7kTTMe/kAC/mRRNjWHjZg0rkiY+e17jXSZV3zJYDxXV8Cy72/Vuw== dependencies: "@types/node" "*" pg-protocol "*" @@ -2753,9 +2984,9 @@ integrity sha512-+68kP9yzs4LMp7VNh8gdzMSPZFL44MLGqiHWvttYJe+6qnuVr4Ek9wSBQoveqY/r+LwjCcU29kNVkidwim+kYA== "@types/qs@^6.2.31": - version "6.9.15" - resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.15.tgz#adde8a060ec9c305a82de1babc1056e73bd64dce" - integrity sha512-uXHQKES6DQKKCLh441Xv/dwxOq1TVS3JPUMlEqoEglvlhR6Mxnlew/Xq/LRVHpLyk7iK3zODe1qYHIMltO7XGg== + version "6.9.14" + resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.14.tgz#169e142bfe493895287bee382af6039795e9b75b" + integrity sha512-5khscbd3SwWMhFqylJBLQ0zIu7c1K6Vz0uBIt915BI3zV0q1nfjRQD3RqSBcPaO6PHEF4ov/t9y89fSiyThlPA== "@types/resolve@^0.0.8": version "0.0.8" @@ -3003,21 +3234,19 @@ acorn-jsx@^5.3.1, acorn-jsx@^5.3.2: integrity sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ== acorn-walk@^8.1.1: - version "8.3.3" - resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.3.tgz#9caeac29eefaa0c41e3d4c65137de4d6f34df43e" - integrity sha512-MxXdReSRhGO7VlFe1bRG/oI7/mdLV9B9JJT0N8vZOhF7gFRR5l3M8W9G8JxmKV+JC5mGqJ0QvqfSOLsCPa4nUw== - dependencies: - acorn "^8.11.0" + version "8.3.2" + resolved "https://registry.yarnpkg.com/acorn-walk/-/acorn-walk-8.3.2.tgz#7703af9415f1b6db9315d6895503862e231d34aa" + integrity sha512-cjkyv4OtNCIeqhHrfS81QWXoCBPExR/J62oyEqepVw8WaQeSqpW2uhuLPh1m9eWhDuOo/jUXVTlifvesOWp/4A== acorn@^7.4.0: version "7.4.1" resolved "https://registry.yarnpkg.com/acorn/-/acorn-7.4.1.tgz#feaed255973d2e77555b83dbc08851a6c63520fa" integrity sha512-nQyp0o1/mNdbTO1PO6kHkwSrmgZ0MT/jCCpNiwbUjGoRN4dlBhqJtoQuCnEOKzgTVwg0ZWiCoQy6SxMebQVh8A== -acorn@^8.11.0, acorn@^8.4.1, acorn@^8.9.0: - version "8.12.1" - resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.12.1.tgz#71616bdccbe25e27a54439e0046e89ca76df2248" - integrity sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg== +acorn@^8.4.1, acorn@^8.9.0: + version "8.11.3" + resolved "https://registry.yarnpkg.com/acorn/-/acorn-8.11.3.tgz#71e0b14e13a4ec160724b38fb7b0f233b1b81d7a" + integrity sha512-Y9rRfJG5jcKOE0CLisYbojUjIrIEE7AGMzA/Sm4BslANhbS+cDMpgBdcPT91oJ7OuJ9hYJBx59RjbhxVnrF8Xg== adm-zip@^0.4.16: version "0.4.16" @@ -3060,14 +3289,14 @@ ajv@^6.10.0, ajv@^6.12.3, ajv@^6.12.4, ajv@^6.12.6: uri-js "^4.2.2" ajv@^8.0.1: - version "8.17.1" - resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.17.1.tgz#37d9a5c776af6bc92d7f4f9510eba4c0a60d11a6" - integrity sha512-B/gBuNg5SiMTrPkC+A2+cW0RszwxYmn6VYxB/inlBStS5nx6xHIt/ehKRhIMhqusl7a8LjQoZnjCs5vhwxOQ1g== + version "8.12.0" + resolved "https://registry.yarnpkg.com/ajv/-/ajv-8.12.0.tgz#d1a0527323e22f53562c567c00991577dfbe19d1" + integrity sha512-sRu1kpcO9yLtYxBKvqfTeh9KzZEwO3STyX1HT+4CaDzC6HpTGYhIhPIzj9XuKU7KYDwnaeh5hcOwjy1QuJzBPA== dependencies: - fast-deep-equal "^3.1.3" - fast-uri "^3.0.1" + fast-deep-equal "^3.1.1" json-schema-traverse "^1.0.0" require-from-string "^2.0.2" + uri-js "^4.2.2" amdefine@>=0.0.4: version "1.0.1" @@ -3086,7 +3315,7 @@ ansi-colors@4.1.1: resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.1.tgz#cbb9ae256bf750af1eab344f229aa27fe94ba348" integrity sha512-JoX0apGbHaUJBNl6yF+p6JAFYZ666/hhCGKN5t9QFjbJQKUU/g8MNbFDbvfrgKXvI1QpZplPOnwIo99lX/AAmA== -ansi-colors@^4.1.1, ansi-colors@^4.1.3: +ansi-colors@^4.1.1: version "4.1.3" resolved "https://registry.yarnpkg.com/ansi-colors/-/ansi-colors-4.1.3.tgz#37611340eb2243e70cc604cad35d63270d48781b" integrity sha512-/6w/C21Pm1A7aZitlI5Ni/2J6FFQN8i1Cvz3kHABAAbw93v/NlvKdVOqz7CCWz/3iv/JplRSEEZ83XION15ovw== @@ -3119,9 +3348,9 @@ ansi-regex@^5.0.1: integrity sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ== ansi-regex@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.0.1.tgz#3183e38fae9a65d7cb5e53945cd5897d0260a06a" - integrity sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA== + version "6.1.0" + resolved "https://registry.yarnpkg.com/ansi-regex/-/ansi-regex-6.1.0.tgz#95ec409c69619d6cb1b8b34f14b660ef28ebd654" + integrity sha512-7HSX4QQb4CspciLpVFwyRe79O3xsIZDDLER21kERQ71oaPodF8jL725AgJMFAYbooIqolJoRLuM81SpeUkpkvA== ansi-styles@^3.2.1: version "3.2.1" @@ -3153,9 +3382,9 @@ antlr4@^4.11.0: integrity sha512-kiXTspaRYvnIArgE97z5YVVf/cDVQABr3abFRR6mE7yesLMkgu4ujuyV/sgxafQ8wgve0DJQUJ38Z8tkgA2izA== antlr4@^4.13.1-patch-1: - version "4.13.1-patch-1" - resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.1-patch-1.tgz#946176f863f890964a050c4f18c47fd6f7e57602" - integrity sha512-OjFLWWLzDMV9rdFhpvroCWR4ooktNg9/nvVYSA5z28wuVpU36QUNuioR1XLnQtcjVlf8npjyz593PxnU/f/Cow== + version "4.13.2" + resolved "https://registry.yarnpkg.com/antlr4/-/antlr4-4.13.2.tgz#0d084ad0e32620482a9c3a0e2470c02e72e4006d" + integrity sha512-QiVbZhyy4xAZ17UPEuG3YTOt8ZaoeOR1CvEAqrEsDBsOqINslaB147i9xqljZqoyf5S+EUlGStaj+t22LT9MOg== antlr4ts@^0.5.0-alpha.4: version "0.5.0-alpha.4" @@ -3338,11 +3567,6 @@ async@^2.4.0: dependencies: lodash "^4.17.14" -async@^3.2.3: - version "3.2.5" - resolved "https://registry.yarnpkg.com/async/-/async-3.2.5.tgz#ebd52a8fdaf7a2289a24df399f8d8485c8a46b66" - integrity sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg== - asynckit@^0.4.0: version "0.4.0" resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" @@ -3361,9 +3585,9 @@ aws-sign2@~0.7.0: integrity sha512-08kcGqnYf/YmjoRhfxyu+CLxBjUtHLXLXX/vUfx9l2LYzG3c1m61nrpyFUZI6zeS+Li/wWMMidD9KgrqtGq3mA== aws4@^1.8.0: - version "1.13.0" - resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.13.0.tgz#d9b802e9bb9c248d7be5f7f5ef178dc3684e9dcc" - integrity sha512-3AungXC4I8kKsS9PuS4JH2nc+0bVY/mjgrephHTIi8fpEeGsTHBUJeosp0Wc1myYMElmD0B3Oc4XL/HVJ4PV2g== + version "1.12.0" + resolved "https://registry.yarnpkg.com/aws4/-/aws4-1.12.0.tgz#ce1c9d143389679e253b314241ea9aa5cec980d3" + integrity sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg== axios@^0.21.1: version "0.21.4" @@ -3372,7 +3596,16 @@ axios@^0.21.1: dependencies: follow-redirects "^1.14.0" -axios@^1.4.0, axios@^1.5.1, axios@^1.7.2: +axios@^1.4.0, axios@^1.5.1: + version "1.6.8" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.6.8.tgz#66d294951f5d988a00e87a0ffb955316a619ea66" + integrity sha512-v/ZHtJDU39mDpyBoFVkETcd/uNdxrWRrg3bKpOKzXFA6Bvqopts6ALSMU3y6ijYxbw2B+wPrIv46egTzJXCLGQ== + dependencies: + follow-redirects "^1.15.6" + form-data "^4.0.0" + proxy-from-env "^1.1.0" + +axios@^1.7.2: version "1.7.2" resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.2.tgz#b625db8a7051fbea61c35a3cbb3a1daa7b9c7621" integrity sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw== @@ -3467,9 +3700,9 @@ balanced-match@^1.0.0: integrity sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw== base-x@^3.0.2: - version "3.0.10" - resolved "https://registry.yarnpkg.com/base-x/-/base-x-3.0.10.tgz#62de58653f8762b5d6f8d9fe30fa75f7b2585a75" - integrity sha512-7d0s06rR9rYaIWHkpfLIFICM/tkSVdoPC9qYAQRpxn9DdKNWNsKC0uk++akckyLq16Tx2WIinnZ6WRriAt6njQ== + version "3.0.9" + resolved "https://registry.yarnpkg.com/base-x/-/base-x-3.0.9.tgz#6349aaabb58526332de9f60995e548a53fe21320" + integrity sha512-H7JU6iBHTal1gp56aKoaa//YUxEaAOUiydvrV/pILqIHXTtqxSkATOnDA2u+jZ/61sD+L/412+7kzXRtWukhpQ== dependencies: safe-buffer "^5.0.1" @@ -3576,19 +3809,19 @@ brace-expansion@^2.0.1: dependencies: balanced-match "^1.0.0" -braces@^3.0.3, braces@~3.0.2: - version "3.0.3" - resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.3.tgz#490332f40919452272d55a8480adc0c441358789" - integrity sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA== +braces@^3.0.2, braces@~3.0.2: + version "3.0.2" + resolved "https://registry.yarnpkg.com/braces/-/braces-3.0.2.tgz#3454e1a462ee8d599e236df336cd9ea4f8afe107" + integrity sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A== dependencies: - fill-range "^7.1.1" + fill-range "^7.0.1" brorand@^1.0.1, brorand@^1.1.0: version "1.1.0" resolved "https://registry.yarnpkg.com/brorand/-/brorand-1.1.0.tgz#12c25efe40a45e3c323eb8675a0a0ce57b22371f" integrity sha512-cKV8tMCEpQs4hK/ik71d6LrPOnpkpGBR0wzxqr68g2m/LB2GxVYQroAjMJZRVM1Y4BCjCKc3vAamxSzOY2RP+w== -browser-stdout@1.3.1, browser-stdout@^1.3.1: +browser-stdout@1.3.1: version "1.3.1" resolved "https://registry.yarnpkg.com/browser-stdout/-/browser-stdout-1.3.1.tgz#baa559ee14ced73452229bad7326467c61fabd60" integrity sha512-qhAVI1+Av2X7qelOfAIYwXONood6XlZE/fXaBSmW/T5SzLAmCgzi+eiWE7fUvbHaeNBQH13UftjpXxsfLkMpgw== @@ -3605,15 +3838,15 @@ browserify-aes@^1.2.0: inherits "^2.0.1" safe-buffer "^5.0.1" -browserslist@^4.23.1: - version "4.23.2" - resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.2.tgz#244fe803641f1c19c28c48c4b6ec9736eb3d32ed" - integrity sha512-qkqSyistMYdxAcw+CzbZwlBy8AGmS/eEWs+sEV5TnLRGDOL+C5M2EnH6tlZyg0YoAxGJAFKh61En9BR941GnHA== +browserslist@^4.22.2: + version "4.23.0" + resolved "https://registry.yarnpkg.com/browserslist/-/browserslist-4.23.0.tgz#8f3acc2bbe73af7213399430890f86c63a5674ab" + integrity sha512-QW8HiM1shhT2GuzkvklfjcKDiWFXHOeFCIA/huJPwHsslwcydgk7X+z2zXpEijP98UCY7HbubZt5J2Zgvf0CaQ== dependencies: - caniuse-lite "^1.0.30001640" - electron-to-chromium "^1.4.820" + caniuse-lite "^1.0.30001587" + electron-to-chromium "^1.4.668" node-releases "^2.0.14" - update-browserslist-db "^1.1.0" + update-browserslist-db "^1.0.13" bs-logger@0.x: version "0.2.6" @@ -3767,10 +4000,10 @@ camelcase@^6.0.0, camelcase@^6.2.0: resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== -caniuse-lite@^1.0.30001640: - version "1.0.30001643" - resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001643.tgz#9c004caef315de9452ab970c3da71085f8241dbd" - integrity sha512-ERgWGNleEilSrHM6iUz/zJNSQTP8Mr21wDWpdgvRwcTXGAq6jMtOUPP4dqFPTdKqZ2wKTdtB+uucZ3MRpAUSmg== +caniuse-lite@^1.0.30001587: + version "1.0.30001608" + resolved "https://registry.yarnpkg.com/caniuse-lite/-/caniuse-lite-1.0.30001608.tgz#7ae6e92ffb300e4b4ec2f795e0abab456ec06cc0" + integrity sha512-cjUJTQkk9fQlJR2s4HMuPMvTiRggl0rAVMtthQuyOlDWuqHXqN8azLq+pi8B2TjwKJ32diHjUqRIKeFX4z1FoA== caseless@^0.12.0, caseless@~0.12.0: version "0.12.0" @@ -3797,13 +4030,26 @@ cbor@^9.0.2: nofilter "^3.1.0" chai-as-promised@^7.1.1: - version "7.1.2" - resolved "https://registry.yarnpkg.com/chai-as-promised/-/chai-as-promised-7.1.2.tgz#70cd73b74afd519754161386421fb71832c6d041" - integrity sha512-aBDHZxRzYnUYuIAIPBH2s511DjlKPzXNlXSGFC8CwmroWQLfrW0LtE1nK3MAwwNhJPa9raEjNCmRoFpG0Hurdw== + version "7.1.1" + resolved "https://registry.yarnpkg.com/chai-as-promised/-/chai-as-promised-7.1.1.tgz#08645d825deb8696ee61725dbf590c012eb00ca0" + integrity sha512-azL6xMoi+uxu6z4rhWQ1jbdUhOMhis2PvscD/xjLqNMkv3BPPp2JyyuTHOrf9BOosGpNQ11v6BKv/g57RXbiaA== dependencies: check-error "^1.0.2" -chai@^4.3.10, chai@^4.3.4, chai@^4.3.6, chai@^4.3.7: +chai@^4.3.10, chai@^4.3.4, chai@^4.3.6: + version "4.4.1" + resolved "https://registry.yarnpkg.com/chai/-/chai-4.4.1.tgz#3603fa6eba35425b0f2ac91a009fe924106e50d1" + integrity sha512-13sOfMv2+DWduEU+/xbun3LScLoqN17nBeTLUsmDfKdoiC1fr0n9PU4guu4AhRcOVFk/sW8LyZWHuhWtQZiF+g== + dependencies: + assertion-error "^1.1.0" + check-error "^1.0.3" + deep-eql "^4.1.3" + get-func-name "^2.0.2" + loupe "^2.3.6" + pathval "^1.1.1" + type-detect "^4.0.8" + +chai@^4.3.7: version "4.5.0" resolved "https://registry.yarnpkg.com/chai/-/chai-4.5.0.tgz#707e49923afdd9b13a8b0b47d33d732d13812fd8" integrity sha512-RITGBfijLkBddZvnn8jdqoTypxvqbOLYQkGGxXzeFjVHvudaPw0HNFD9x928/eUwYWd2dPCugVqspGALTZZQKw== @@ -3816,7 +4062,7 @@ chai@^4.3.10, chai@^4.3.4, chai@^4.3.6, chai@^4.3.7: pathval "^1.1.1" type-detect "^4.1.0" -chalk@4.1.2, chalk@^4.0.0, chalk@^4.0.2, chalk@^4.1.0, chalk@^4.1.2: +chalk@4.1.2, chalk@^4.0.0, chalk@^4.1.0, chalk@^4.1.2: version "4.1.2" resolved "https://registry.yarnpkg.com/chalk/-/chalk-4.1.2.tgz#aac4e2b7734a740867aeb16bf02aad556a1e7a01" integrity sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA== @@ -3870,7 +4116,7 @@ chokidar@3.5.3: optionalDependencies: fsevents "~2.3.2" -chokidar@^3.4.0, chokidar@^3.5.3: +chokidar@^3.4.0: version "3.6.0" resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-3.6.0.tgz#197c6cc669ef2a8dc5e7b4d97ee4e092c3eb0d5b" integrity sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw== @@ -3885,6 +4131,13 @@ chokidar@^3.4.0, chokidar@^3.5.3: optionalDependencies: fsevents "~2.3.2" +chokidar@^4.0.0: + version "4.0.1" + resolved "https://registry.yarnpkg.com/chokidar/-/chokidar-4.0.1.tgz#4a6dff66798fb0f72a94f616abbd7e1a19f31d41" + integrity sha512-n8enUVCED/KVRQlab1hr3MVpcVMvxtZjmEa956u+4YijlmQED223XMSYj2tLuKvr4jcCTzNNMpQDUer72MMmzA== + dependencies: + readdirp "^4.0.1" + chownr@^1.0.1, chownr@^1.1.1: version "1.1.4" resolved "https://registry.yarnpkg.com/chownr/-/chownr-1.1.4.tgz#6fc9d7b42d32a583596337666e7d08084da2cc6b" @@ -3909,9 +4162,9 @@ cipher-base@^1.0.0, cipher-base@^1.0.1, cipher-base@^1.0.3: safe-buffer "^5.0.1" cjs-module-lexer@^1.0.0: - version "1.3.1" - resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.3.1.tgz#c485341ae8fd999ca4ee5af2d7a1c9ae01e0099c" - integrity sha512-a3KdPAANPbNE4ZUv9h6LckSl9zLsYOP4MBmhIPkRaeyybt+r4UghLvq+xw/YwUcC1gqylCkL4rdVs3Lwupjm4Q== + version "1.2.3" + resolved "https://registry.yarnpkg.com/cjs-module-lexer/-/cjs-module-lexer-1.2.3.tgz#6c370ab19f8a3394e318fe682686ec0ac684d107" + integrity sha512-0TNiGstbQmCFwt4akjjBg5pLRTSyj/PkWQ1ZoO2zntmg9yLqSRxwEa4iCfQLGjqhiqBfOJa7W/E8wfGrTDmlZQ== clean-stack@^2.0.0: version "2.2.0" @@ -3941,9 +4194,9 @@ cli-table3@^0.5.0: colors "^1.1.2" cli-table3@^0.6.0: - version "0.6.5" - resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.5.tgz#013b91351762739c16a9567c21a04632e449bf2f" - integrity sha512-+W/5efTR7y5HRD7gACw9yQjqMVvEMLBHmboM/kPWam+H+Hmyrgjh6YncVKK122YZkXrLudzTuAukUw9FnMf7IQ== + version "0.6.4" + resolved "https://registry.yarnpkg.com/cli-table3/-/cli-table3-0.6.4.tgz#d1c536b8a3f2e7bec58f67ac9e5769b1b30088b0" + integrity sha512-Lm3L0p+/npIQWNIiyF/nAn7T5dnOwR3xNTHXYEBFBFVPXzCVNZ5lqEC/1eo/EVfpDsQ1I+TX4ORPQgp+UI0CRw== dependencies: string-width "^4.2.0" optionalDependencies: @@ -3983,9 +4236,9 @@ code-block-writer@^12.0.0: integrity sha512-q4dMFMlXtKR3XNBHyMHt/3pwYNA69EDk00lloMOaaUMKPUXBw6lpXtbu3MMVG6/uOihGnRDOlkyqsONEUj60+w== code-block-writer@^13.0.1: - version "13.0.2" - resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-13.0.2.tgz#e1c6c3dbe5d38b4ac76fb62c4d4b2fc4bf04c9c1" - integrity sha512-XfXzAGiStXSmCIwrkdfvc7FS5Dtj8yelCtyOf2p2skCAfvLd6zu0rGzuS9NSCO3bq1JKpFZ7tbKdKlcd5occQA== + version "13.0.3" + resolved "https://registry.yarnpkg.com/code-block-writer/-/code-block-writer-13.0.3.tgz#90f8a84763a5012da7af61319dd638655ae90b5b" + integrity sha512-Oofo0pq3IKnsFtuHqSF7TqBfr71aeyZDVJ0HpmqB7FBM2qEigL0iPONSCZSO9pE9dZTAxANe5XHG9Uy0YMv8cg== collect-v8-coverage@^1.0.0: version "1.0.2" @@ -4145,9 +4398,9 @@ cookie@^0.4.1: integrity sha512-aSWTXFzaKWkvHO1Ny/s+ePFpvKsPnjc551iI41v3ny/ow6tBG5Vd+FuqGNhh1LxOmVzOlGUriIlOaokOvhaStA== core-js-pure@^3.0.1: - version "3.37.1" - resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.37.1.tgz#2b4b34281f54db06c9a9a5bd60105046900553bd" - integrity sha512-J/r5JTHSmzTxbiYYrzXg9w1VpqrYt+gexenBE9pugeyhwPZTAEJddyiReJWsLO6uNQ8xJZFbod6XC7KKwatCiA== + version "3.36.1" + resolved "https://registry.yarnpkg.com/core-js-pure/-/core-js-pure-3.36.1.tgz#1461c89e76116528b54eba20a0aff30164087a94" + integrity sha512-NXCvHvSVYSrewP0L5OhltzXeWFJLo2AL2TYnj6iLV3Bw8mM62wAQMNgUCRI6EBu6hVVpbCxmOPlxh1Ikw2PfUA== core-js@^2.4.0: version "2.6.12" @@ -4175,12 +4428,12 @@ cosmiconfig@^8.0.0: path-type "^4.0.0" cpu-features@~0.0.9: - version "0.0.10" - resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.10.tgz#9aae536db2710c7254d7ed67cb3cbc7d29ad79c5" - integrity sha512-9IkYqtX3YHPCzoVg1Py+o9057a3i0fp7S530UWokCSaFVTc7CwXPRiOjRjBQQ18ZCNafx78YfnG+HALxtVmOGA== + version "0.0.9" + resolved "https://registry.yarnpkg.com/cpu-features/-/cpu-features-0.0.9.tgz#5226b92f0f1c63122b0a3eb84cb8335a4de499fc" + integrity sha512-AKjgn2rP2yJyfbepsmLfiYcmtNn/2eUvocUyM/09yB0YDiz39HteK/5/T4Onf0pmdYDMgkBoGvRLvEguzyL7wQ== dependencies: buildcheck "~0.0.6" - nan "^2.19.0" + nan "^2.17.0" crc-32@^1.2.0: version "1.2.2" @@ -4297,10 +4550,10 @@ death@^1.1.0: resolved "https://registry.yarnpkg.com/death/-/death-1.1.0.tgz#01aa9c401edd92750514470b8266390c66c67318" integrity sha512-vsV6S4KVHvTGxbEcij7hkWRv0It+sGGWVOM67dQde/o5Xjnr+KmLjxWJii2uEObIrt1CcM9w0Yaovx+iOlIL+w== -debug@4, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.3, debug@^4.3.4, debug@^4.3.5: - version "4.3.6" - resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.6.tgz#2ab2c38fbaffebf8aa95fdfe6d88438c7a13c52b" - integrity sha512-O/09Bd4Z1fBrU4VzkhFqVgpPzaGbw6Sm9FEkBT1A/YBXQFGuuSxa1dN2nxgxS34JmKXqYx8CZAwEVoJFImUXIg== +debug@4, debug@4.3.4, debug@^4.0.1, debug@^4.1.0, debug@^4.1.1, debug@^4.3.1, debug@^4.3.2, debug@^4.3.3, debug@^4.3.4: + version "4.3.4" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.4.tgz#1319f6579357f2338d3337d2cdd4914bb5dcc865" + integrity sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ== dependencies: ms "2.1.2" @@ -4325,6 +4578,13 @@ debug@^3.1.0, debug@^3.2.6, debug@^3.2.7: dependencies: ms "^2.1.1" +debug@^4.3.5: + version "4.3.5" + resolved "https://registry.yarnpkg.com/debug/-/debug-4.3.5.tgz#e83444eceb9fedd4a1da56d671ae2446a01a6e1e" + integrity sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg== + dependencies: + ms "2.1.2" + decamelize@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/decamelize/-/decamelize-4.0.0.tgz#aa472d7bf660eb15f3494efd531cab7f2a709837" @@ -4338,14 +4598,14 @@ decompress-response@^6.0.0: mimic-response "^3.1.0" dedent@^1.0.0: - version "1.5.3" - resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.3.tgz#99aee19eb9bae55a67327717b6e848d0bf777e5a" - integrity sha512-NHQtfOOW68WD8lgypbLA5oT+Bt0xXJhiYvoR6SmmNXZfpzOGXwdKWmcwG8N7PwVVWV3eF/68nmD9BaJSsTBhyQ== + version "1.5.1" + resolved "https://registry.yarnpkg.com/dedent/-/dedent-1.5.1.tgz#4f3fc94c8b711e9bb2800d185cd6ad20f2a90aff" + integrity sha512-+LxW+KLWxu3HW3M2w2ympwtqPrqYRzU8fqi6Fhd18fBALe15blJPI/I4+UHveMVG6lJqB4JNd4UG0S5cnVHwIg== deep-eql@^4.0.1, deep-eql@^4.1.3: - version "4.1.4" - resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-4.1.4.tgz#d0d3912865911bb8fac5afb4e3acfa6a28dc72b7" - integrity sha512-SUwdGfqdKOwxCPeVYjwSyRpJ7Z+fhpwIAtmCUdZIWZ/YP5R9WAsyuSgpLVDi9bjWoN2LXHNss/dk3urXtdQxGg== + version "4.1.3" + resolved "https://registry.yarnpkg.com/deep-eql/-/deep-eql-4.1.3.tgz#7c7775513092f7df98d8df9996dd085eb668cc6d" + integrity sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw== dependencies: type-detect "^4.0.0" @@ -4391,7 +4651,7 @@ define-data-property@^1.0.1, define-data-property@^1.1.4: es-errors "^1.3.0" gopd "^1.0.1" -define-properties@^1.2.0, define-properties@^1.2.1: +define-properties@^1.1.3, define-properties@^1.2.0, define-properties@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/define-properties/-/define-properties-1.2.1.tgz#10781cc616eb951a80a034bafcaa7377f6af2b6c" integrity sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg== @@ -4430,7 +4690,7 @@ diff@^4.0.1: resolved "https://registry.yarnpkg.com/diff/-/diff-4.0.2.tgz#60f3aecb89d5fae520c11aa19efc2bb982aade7d" integrity sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A== -diff@^5.2.0: +diff@^5.1.0, diff@^5.2.0: version "5.2.0" resolved "https://registry.yarnpkg.com/diff/-/diff-5.2.0.tgz#26ded047cd1179b78b9537d5ef725503ce1ae531" integrity sha512-uIFDxqpRZGZ6ThOk84hEfqWoHx2devRFvpTZcTHur85vImfaxUbTW9Ryh4CpCuDnToOP1CEtXKIgytHBPVff5A== @@ -4555,17 +4815,10 @@ ecdsa-sig-formatter@1.0.11: dependencies: safe-buffer "^5.0.1" -ejs@^3.1.10: - version "3.1.10" - resolved "https://registry.yarnpkg.com/ejs/-/ejs-3.1.10.tgz#69ab8358b14e896f80cc39e62087b88500c3ac3b" - integrity sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA== - dependencies: - jake "^10.8.5" - -electron-to-chromium@^1.4.820: - version "1.5.2" - resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.5.2.tgz#6126ad229ce45e781ec54ca40db0504787f23d19" - integrity sha512-kc4r3U3V3WLaaZqThjYz/Y6z8tJe+7K0bbjUVo3i+LWIypVdMx5nXCkwRe6SWbY6ILqLdc1rKcKmr3HoH7wjSQ== +electron-to-chromium@^1.4.668: + version "1.4.731" + resolved "https://registry.yarnpkg.com/electron-to-chromium/-/electron-to-chromium-1.4.731.tgz#d3dc19f359045b750a1fb0bc42315a502d950187" + integrity sha512-+TqVfZjpRz2V/5SPpmJxq9qK620SC5SqCnxQIOi7i/U08ZDcTpKbT7Xjj9FU5CbXTMUb4fywbIr8C7cGv4hcjw== elliptic@6.5.4: version "6.5.4" @@ -4581,9 +4834,9 @@ elliptic@6.5.4: minimalistic-crypto-utils "^1.0.1" elliptic@^6.5.2, elliptic@^6.5.4, elliptic@^6.5.5: - version "6.5.6" - resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.6.tgz#ee5f7c3a00b98a2144ac84d67d01f04d438fa53e" - integrity sha512-mpzdtpeCLuS3BmE3pO3Cpp5bbjlOPY2Q0PgoF+Od1XZrHLYI28Xe3ossCmYCQt11FQKEYd9+PF8jymTvtWJSHQ== + version "6.5.5" + resolved "https://registry.yarnpkg.com/elliptic/-/elliptic-6.5.5.tgz#c715e09f78b6923977610d4c2346d6ce22e6dded" + integrity sha512-7EjbcmUm17NQFu4Pmgmq2olYMj8nwMnpcddByChSUjArp8F5DQWcIcpriwO4ZToLNAJig0yiyjswfyGNje/ixw== dependencies: bn.js "^4.11.9" brorand "^1.1.0" @@ -4636,9 +4889,9 @@ end-of-stream@^1.0.0, end-of-stream@^1.1.0, end-of-stream@^1.4.1: once "^1.4.0" enhanced-resolve@^5.12.0: - version "5.17.1" - resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.17.1.tgz#67bfbbcc2f81d511be77d686a90267ef7f898a15" - integrity sha512-LMHl3dXhTcfv8gM4kEzIUeTQ+7fpdA0l2tUf34BddXPkz2A5xJ5L/Pchd5BL6rdccM9QGvu0sWZzK1Z1t4wwyg== + version "5.16.0" + resolved "https://registry.yarnpkg.com/enhanced-resolve/-/enhanced-resolve-5.16.0.tgz#65ec88778083056cb32487faa9aef82ed0864787" + integrity sha512-O+QWCviPNSSLAD9Ucn8Awv+poAkqn3T1XY5/N7kR7rQO9yfSGWkYZDwpJ+iKF7B8rxaQKWngSqACpgzeapSyoA== dependencies: graceful-fs "^4.2.4" tapable "^2.2.0" @@ -4781,7 +5034,7 @@ es6-promisify@^6.0.0: resolved "https://registry.yarnpkg.com/es6-promisify/-/es6-promisify-6.1.1.tgz#46837651b7b06bf6fff893d03f29393668d01621" integrity sha512-HBL8I3mIki5C1Cc9QjKUenHtnG0A5/xA8Q/AllRcfiwl2CZFXGK7ddBiCoRwAix4i2KxcQfjtIVcrVbB3vbmwg== -escalade@^3.1.1, escalade@^3.1.2: +escalade@^3.1.1: version "3.1.2" resolved "https://registry.yarnpkg.com/escalade/-/escalade-3.1.2.tgz#54076e9ab29ea5bf3d8f1ed62acffbb88272df27" integrity sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA== @@ -4871,12 +5124,12 @@ eslint-plugin-import@^2.29.0: tsconfig-paths "^3.15.0" eslint-plugin-prettier@^5.0.1: - version "5.2.1" - resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-5.2.1.tgz#d1c8f972d8f60e414c25465c163d16f209411f95" - integrity sha512-gH3iR3g4JfF+yYPaJYkN7jEl9QbweL/YfkoRlNnuIEHEz1vHVlCmWOS+eGGiRuzHQXdJFCOTxRgvju9b8VUmrw== + version "5.1.3" + resolved "https://registry.yarnpkg.com/eslint-plugin-prettier/-/eslint-plugin-prettier-5.1.3.tgz#17cfade9e732cef32b5f5be53bd4e07afd8e67e1" + integrity sha512-C9GCVAs4Eq7ZC/XFQHITLiHJxQngdtraXaM+LoUFoFp/lHNl2Zn8f3WQbe9HvTBBQ9YnKFB0/2Ajdqwo5D1EAw== dependencies: prettier-linter-helpers "^1.0.0" - synckit "^0.9.1" + synckit "^0.8.6" eslint-scope@^5.1.1: version "5.1.1" @@ -5035,9 +5288,9 @@ esprima@^4.0.0: integrity sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A== esquery@^1.4.0, esquery@^1.4.2: - version "1.6.0" - resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.6.0.tgz#91419234f804d852a82dceec3e16cdc22cf9dae7" - integrity sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg== + version "1.5.0" + resolved "https://registry.yarnpkg.com/esquery/-/esquery-1.5.0.tgz#6ce17738de8577694edd7361c57182ac8cb0db0b" + integrity sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg== dependencies: estraverse "^5.1.0" @@ -5088,9 +5341,9 @@ eth-gas-reporter@^0.2.25: sync-request "^6.0.0" ethereum-bloom-filters@^1.0.6: - version "1.2.0" - resolved "https://registry.yarnpkg.com/ethereum-bloom-filters/-/ethereum-bloom-filters-1.2.0.tgz#8294f074c1a6cbd32c39d2cc77ce86ff14797dab" - integrity sha512-28hyiE7HVsWubqhpVLVmZXFd4ITeHi+BUu05o9isf0GUpMtzBUi+8/gFrGaGYzvGAJQmJ3JKj77Mk9G98T84rA== + version "1.1.0" + resolved "https://registry.yarnpkg.com/ethereum-bloom-filters/-/ethereum-bloom-filters-1.1.0.tgz#b3fc1eb789509ee30db0bf99a2988ccacb8d0397" + integrity sha512-J1gDRkLpuGNvWYzWslBQR9cDV4nd4kfvVTE/Wy4Kkm4yb3EYRSlyi0eB/inTsSTTVyA0+HyzHgbr95Fn/Z1fSw== dependencies: "@noble/hashes" "^1.4.0" @@ -5126,14 +5379,14 @@ ethereum-cryptography@^1.0.3: "@scure/bip39" "1.1.1" ethereum-cryptography@^2.0.0, ethereum-cryptography@^2.1.2: - version "2.2.1" - resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-2.2.1.tgz#58f2810f8e020aecb97de8c8c76147600b0b8ccf" - integrity sha512-r/W8lkHSiTLxUxW8Rf3u4HGB0xQweG2RyETjywylKZSzLWoWAijRz8WCuOtJ6wah+avllXBqZuk29HCCvhEIRg== + version "2.1.3" + resolved "https://registry.yarnpkg.com/ethereum-cryptography/-/ethereum-cryptography-2.1.3.tgz#1352270ed3b339fe25af5ceeadcf1b9c8e30768a" + integrity sha512-BlwbIL7/P45W8FGW2r7LGuvoEZ+7PWsniMvQ4p5s2xCyw9tmaDlpfsN9HjAucbF+t/qpVHwZUisgfK24TCW8aA== dependencies: - "@noble/curves" "1.4.2" - "@noble/hashes" "1.4.0" - "@scure/bip32" "1.4.0" - "@scure/bip39" "1.3.0" + "@noble/curves" "1.3.0" + "@noble/hashes" "1.3.3" + "@scure/bip32" "1.3.3" + "@scure/bip39" "1.2.2" ethereum-waffle@^4.0.10: version "4.0.10" @@ -5227,9 +5480,9 @@ ethers@^5.0.2, ethers@^5.7.0, ethers@^5.7.2, ethers@~5.7.0, ethers@~5.7.2: "@ethersproject/wordlists" "5.7.0" ethers@^6.7.1: - version "6.13.2" - resolved "https://registry.yarnpkg.com/ethers/-/ethers-6.13.2.tgz#4b67d4b49e69b59893931a032560999e5e4419fe" - integrity sha512-9VkriTTed+/27BGuY1s0hf441kqwHJ1wtN2edksEtiRvXx+soxRX3iSXTfFqq2+YwrOqbDoTHjIhQnjJRlzKmg== + version "6.12.1" + resolved "https://registry.yarnpkg.com/ethers/-/ethers-6.12.1.tgz#517ff6d66d4fd5433e38e903051da3e57c87ff37" + integrity sha512-j6wcVoZf06nqEcBbDWkKg8Fp895SS96dSnTCjiXT+8vt2o02raTn4Lo9ERUuIVU5bAjoPYeA+7ytQFexFmLuVw== dependencies: "@adraffy/ens-normalize" "1.10.1" "@noble/curves" "1.2.0" @@ -5237,7 +5490,7 @@ ethers@^6.7.1: "@types/node" "18.15.13" aes-js "4.0.0-beta.5" tslib "2.4.0" - ws "8.17.1" + ws "8.5.0" ethers@~5.5.0: version "5.5.4" @@ -5484,11 +5737,6 @@ fast-levenshtein@^2.0.6, fast-levenshtein@~2.0.6: resolved "https://registry.yarnpkg.com/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz#3d8a5c66883a16a30ca8643e851f19baa7797917" integrity sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw== -fast-uri@^3.0.1: - version "3.0.1" - resolved "https://registry.yarnpkg.com/fast-uri/-/fast-uri-3.0.1.tgz#cddd2eecfc83a71c1be2cc2ef2061331be8a7134" - integrity sha512-MWipKbbYiYI0UC7cl8m/i/IWTqfC8YXsqjzybjddLsFjStroQzsHXkc73JutMvBiXmOvapk+axIl79ig5t55Bw== - fastq@^1.6.0: version "1.17.1" resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" @@ -5517,17 +5765,10 @@ file-entry-cache@^6.0.1: dependencies: flat-cache "^3.0.4" -filelist@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/filelist/-/filelist-1.0.4.tgz#f78978a1e944775ff9e62e744424f215e58352b5" - integrity sha512-w1cEuf3S+DrLCQL7ET6kz+gmlJdbq9J7yXCSjK/OZCPA+qEN1WyF4ZAf0YYJa4/shHJra2t/d/r8SV4Ji+x+8Q== - dependencies: - minimatch "^5.0.1" - -fill-range@^7.1.1: - version "7.1.1" - resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.1.1.tgz#44265d3cac07e3ea7dc247516380643754a05292" - integrity sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg== +fill-range@^7.0.1: + version "7.0.1" + resolved "https://registry.yarnpkg.com/fill-range/-/fill-range-7.0.1.tgz#1919a6a7c75fe38b2c7c77e5198535da9acdda40" + integrity sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ== dependencies: to-regex-range "^5.0.1" @@ -5601,9 +5842,9 @@ for-each@^0.3.3: is-callable "^1.1.3" foreground-child@^3.1.0: - version "3.2.1" - resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.2.1.tgz#767004ccf3a5b30df39bed90718bab43fe0a59f7" - integrity sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA== + version "3.3.0" + resolved "https://registry.yarnpkg.com/foreground-child/-/foreground-child-3.3.0.tgz#0ac8644c06e431439f8561db8ecf29a7b5519c77" + integrity sha512-Ld2g8rrAyMYFXBhEqMz8ZAHBi4J4uS1i/CxGMDnjyFWddMXLVcDp051DZfu+t7+ab7Wv6SMqpWmyFIj5UbfFvg== dependencies: cross-spawn "^7.0.0" signal-exit "^4.0.1" @@ -5826,9 +6067,9 @@ get-symbol-description@^1.0.2: get-intrinsic "^1.2.4" get-tsconfig@^4.5.0: - version "4.7.6" - resolved "https://registry.yarnpkg.com/get-tsconfig/-/get-tsconfig-4.7.6.tgz#118fd5b7b9bae234cc7705a00cd771d7eb65d62a" - integrity sha512-ZAqrLlu18NbDdRaHq+AKXzAmqIUPswPWKUchfytdAjiRFnCe5ojG2bstg6mRiZabkKfCoL/e98pbBELIV/YCeA== + version "4.7.3" + resolved "https://registry.yarnpkg.com/get-tsconfig/-/get-tsconfig-4.7.3.tgz#0498163d98f7b58484dd4906999c0c9d5f103f83" + integrity sha512-ZvkrzoUA0PQZM6fy6+/Hce561s+faD1rsNwhnO5FelNjyy7EMGJ3Rz1AQ8GYDWjhRs/7dBLOEJvhK8MiEJOAFg== dependencies: resolve-pkg-maps "^1.0.0" @@ -5885,6 +6126,17 @@ glob@7.2.0: once "^1.3.0" path-is-absolute "^1.0.0" +glob@8.1.0, glob@^8.0.3: + version "8.1.0" + resolved "https://registry.yarnpkg.com/glob/-/glob-8.1.0.tgz#d388f656593ef708ee3e34640fdfb99a9fd1c33e" + integrity sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ== + dependencies: + fs.realpath "^1.0.0" + inflight "^1.0.4" + inherits "2" + minimatch "^5.0.1" + once "^1.3.0" + glob@^10.4.1: version "10.4.5" resolved "https://registry.yarnpkg.com/glob/-/glob-10.4.5.tgz#f4d9f0b90ffdbab09c9d77f5f29b4262517b0956" @@ -5920,17 +6172,6 @@ glob@^7.0.0, glob@^7.1.2, glob@^7.1.3, glob@^7.1.4, glob@^7.1.6: once "^1.3.0" path-is-absolute "^1.0.0" -glob@^8.0.3, glob@^8.1.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/glob/-/glob-8.1.0.tgz#d388f656593ef708ee3e34640fdfb99a9fd1c33e" - integrity sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ== - dependencies: - fs.realpath "^1.0.0" - inflight "^1.0.4" - inherits "2" - minimatch "^5.0.1" - once "^1.3.0" - glob@~8.0.3: version "8.0.3" resolved "https://registry.yarnpkg.com/glob/-/glob-8.0.3.tgz#415c6eb2deed9e502c68fa44a272e6da6eeca42e" @@ -5971,12 +6212,11 @@ globals@^13.19.0, globals@^13.6.0, globals@^13.9.0: type-fest "^0.20.2" globalthis@^1.0.3: - version "1.0.4" - resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.4.tgz#7430ed3a975d97bfb59bcce41f5cabbafa651236" - integrity sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ== + version "1.0.3" + resolved "https://registry.yarnpkg.com/globalthis/-/globalthis-1.0.3.tgz#5852882a52b80dc301b0660273e1ed082f0b6ccf" + integrity sha512-sFdI5LyBiNTHjRd7cGPWapiHWMOXKyuBNX/cWJ3NfzrZQVa8GI/8cofCl74AOVqq9W5kNmguTIzJ/1s2gyI9wA== dependencies: - define-properties "^1.2.1" - gopd "^1.0.1" + define-properties "^1.1.3" globby@^10.0.1: version "10.0.2" @@ -6151,13 +6391,13 @@ hardhat@=2.22.2: ws "^7.4.6" hardhat@^2.14.0: - version "2.22.10" - resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.10.tgz#826ab56e47af98406e6dd105ba6d2dbb148013d9" - integrity sha512-JRUDdiystjniAvBGFmJRsiIZSOP2/6s++8xRDe3TzLeQXlWWHsXBrd9wd3JWFyKXvgMqMeLL5Sz/oNxXKYw9vg== + version "2.22.12" + resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.12.tgz#a6d0be011fc009c50c454da367ad28c29f58d446" + integrity sha512-yok65M+LsOeTBHQsjg//QreGCyrsaNmeLVzhTFqlOvZ4ZE5y69N0wRxH1b2BC9dGK8S8OPUJMNiL9X0RAvbm8w== dependencies: "@ethersproject/abi" "^5.1.2" "@metamask/eth-sig-util" "^4.0.0" - "@nomicfoundation/edr" "^0.5.2" + "@nomicfoundation/edr" "^0.6.1" "@nomicfoundation/ethereumjs-common" "4.0.4" "@nomicfoundation/ethereumjs-tx" "5.0.4" "@nomicfoundation/ethereumjs-util" "9.0.4" @@ -6170,7 +6410,7 @@ hardhat@^2.14.0: ansi-escapes "^4.3.0" boxen "^5.1.2" chalk "^2.4.2" - chokidar "^3.4.0" + chokidar "^4.0.0" ci-info "^2.0.0" debug "^4.1.1" enquirer "^2.3.0" @@ -6183,6 +6423,7 @@ hardhat@^2.14.0: glob "7.2.0" immutable "^4.0.0-rc.12" io-ts "1.10.4" + json-stream-stringify "^3.1.4" keccak "^3.0.2" lodash "^4.17.11" mnemonist "^0.38.0" @@ -6199,6 +6440,55 @@ hardhat@^2.14.0: uuid "^8.3.2" ws "^7.4.6" +hardhat@^2.22.5: + version "2.22.5" + resolved "https://registry.yarnpkg.com/hardhat/-/hardhat-2.22.5.tgz#7e1a4311fa9e34a1cfe337784eae06706f6469a5" + integrity sha512-9Zq+HonbXCSy6/a13GY1cgHglQRfh4qkzmj1tpPlhxJDwNVnhxlReV6K7hCWFKlOrV13EQwsdcD0rjcaQKWRZw== + dependencies: + "@ethersproject/abi" "^5.1.2" + "@metamask/eth-sig-util" "^4.0.0" + "@nomicfoundation/edr" "^0.4.0" + "@nomicfoundation/ethereumjs-common" "4.0.4" + "@nomicfoundation/ethereumjs-tx" "5.0.4" + "@nomicfoundation/ethereumjs-util" "9.0.4" + "@nomicfoundation/solidity-analyzer" "^0.1.0" + "@sentry/node" "^5.18.1" + "@types/bn.js" "^5.1.0" + "@types/lru-cache" "^5.1.0" + adm-zip "^0.4.16" + aggregate-error "^3.0.0" + ansi-escapes "^4.3.0" + boxen "^5.1.2" + chalk "^2.4.2" + chokidar "^3.4.0" + ci-info "^2.0.0" + debug "^4.1.1" + enquirer "^2.3.0" + env-paths "^2.2.0" + ethereum-cryptography "^1.0.3" + ethereumjs-abi "^0.6.8" + find-up "^2.1.0" + fp-ts "1.19.3" + fs-extra "^7.0.1" + glob "7.2.0" + immutable "^4.0.0-rc.12" + io-ts "1.10.4" + keccak "^3.0.2" + lodash "^4.17.11" + mnemonist "^0.38.0" + mocha "^10.0.0" + p-map "^4.0.0" + raw-body "^2.4.1" + resolve "1.17.0" + semver "^6.3.0" + solc "0.7.3" + source-map-support "^0.5.13" + stacktrace-parser "^0.1.10" + tsort "0.0.1" + undici "^5.14.0" + uuid "^8.3.2" + ws "^7.4.6" + has-bigints@^1.0.1, has-bigints@^1.0.2: version "1.0.2" resolved "https://registry.yarnpkg.com/has-bigints/-/has-bigints-1.0.2.tgz#0871bd3e3d51626f6ca0966668ba35d5602d6eaa" @@ -6267,7 +6557,7 @@ hasown@^2.0.0, hasown@^2.0.1, hasown@^2.0.2: dependencies: function-bind "^1.1.2" -he@1.2.0, he@^1.2.0: +he@1.2.0: version "1.2.0" resolved "https://registry.yarnpkg.com/he/-/he-1.2.0.tgz#84ae65fa7eafb165fddb61566ae14baf05664f0f" integrity sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw== @@ -6402,9 +6692,9 @@ immediate@~3.2.3: integrity sha512-RrGCXRm/fRVqMIhqXrGEX9rRADavPiDFSoMb/k64i9XMk8uH4r/Omi5Ctierj6XzNecwDbO4WuFbDD1zmpl3Tg== immutable@^4.0.0-rc.12: - version "4.3.7" - resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.3.7.tgz#c70145fc90d89fb02021e65c84eb0226e4e5a381" - integrity sha512-1hqclzwYwjRDFLjcFxOM5AYkkG0rpFPpr1RLPMEuGczoS7YA8gLhy8SWXYRAA/XwfEHpfo3cw5JGioS32fnMRw== + version "4.3.5" + resolved "https://registry.yarnpkg.com/immutable/-/immutable-4.3.5.tgz#f8b436e66d59f99760dc577f5c99a4fd2a5cc5a0" + integrity sha512-8eabxkth9gZatlwl5TBuJnCsoTADlL6ftEr7A4qgdaTsPyreilDSnUk57SO+jfKcNtxPa22U5KK6DSeAYhpBJw== import-fresh@^3.0.0, import-fresh@^3.2.1, import-fresh@^3.3.0: version "3.3.0" @@ -6415,9 +6705,9 @@ import-fresh@^3.0.0, import-fresh@^3.2.1, import-fresh@^3.3.0: resolve-from "^4.0.0" import-local@^3.0.2: - version "3.2.0" - resolved "https://registry.yarnpkg.com/import-local/-/import-local-3.2.0.tgz#c3d5c745798c02a6f8b897726aba5100186ee260" - integrity sha512-2SPlun1JUPWoM6t3F0dw0FkCF/jWY8kttcY4f599GLTSjh2OCuuhdTkJQsEcZzBqbXZGKMK2OqW1oZsjtf/gQA== + version "3.1.0" + resolved "https://registry.yarnpkg.com/import-local/-/import-local-3.1.0.tgz#b4479df8a5fd44f6cdce24070675676063c95cb4" + integrity sha512-ASB07uLtnDs1o6EHjKpX34BKYDSqnFerfTOJL2HvMqF70LnxpjkzDB8J44oT9pu4AMPkQwf8jl6szgvNd2tRIg== dependencies: pkg-dir "^4.2.0" resolve-cwd "^3.0.0" @@ -6546,11 +6836,11 @@ is-callable@^1.1.3, is-callable@^1.1.4, is-callable@^1.2.7: integrity sha512-1BC0BVFhS/p0qtw6enp8e+8OD0UrK0oFLztSjNzhcKA3WDuJxxAPXzPuPtKkjEY9UUoEWlX/8fgKeu2S8i9JTA== is-core-module@^2.11.0, is-core-module@^2.13.0, is-core-module@^2.13.1: - version "2.15.0" - resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.15.0.tgz#71c72ec5442ace7e76b306e9d48db361f22699ea" - integrity sha512-Dd+Lb2/zvk9SKy1TGCt1wFJFo/MWBPMX5x7KcvLajWTGuomczdQX61PvY5yK6SVACwpoexWo81IfFyoKY2QnTA== + version "2.13.1" + resolved "https://registry.yarnpkg.com/is-core-module/-/is-core-module-2.13.1.tgz#ad0d7532c6fea9da1ebdc82742d74525c6273384" + integrity sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw== dependencies: - hasown "^2.0.2" + hasown "^2.0.0" is-data-view@^1.0.1: version "1.0.1" @@ -6735,9 +7025,9 @@ istanbul-lib-instrument@^5.0.4: semver "^6.3.0" istanbul-lib-instrument@^6.0.0: - version "6.0.3" - resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.3.tgz#fa15401df6c15874bcb2105f773325d78c666765" - integrity sha512-Vtgk7L/R2JHyyGW07spoFlB8/lpjiOLTjMdms6AFMraYt3BaJauod/NGrfnVG/y4Ix1JEuMRPDPEj2ua+zz1/Q== + version "6.0.2" + resolved "https://registry.yarnpkg.com/istanbul-lib-instrument/-/istanbul-lib-instrument-6.0.2.tgz#91655936cf7380e4e473383081e38478b69993b1" + integrity sha512-1WUsZ9R1lA0HtBSohTkm39WTPlNKSJ5iFk7UwqXkBLoHQT+hfqPsfsTDVuZdKGaBwn7din9bS7SsnoAr943hvw== dependencies: "@babel/core" "^7.23.9" "@babel/parser" "^7.23.9" @@ -6780,16 +7070,6 @@ jackspeak@^3.1.2: optionalDependencies: "@pkgjs/parseargs" "^0.11.0" -jake@^10.8.5: - version "10.9.2" - resolved "https://registry.yarnpkg.com/jake/-/jake-10.9.2.tgz#6ae487e6a69afec3a5e167628996b59f35ae2b7f" - integrity sha512-2P4SQ0HrLQ+fw6llpLnOaGAvN2Zu6778SJMrCUwns4fOoG9ayrTiZk3VV8sCPkVZF8ab0zksVpS8FDY5pRCNBA== - dependencies: - async "^3.2.3" - chalk "^4.0.2" - filelist "^1.0.4" - minimatch "^3.1.2" - jest-changed-files@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/jest-changed-files/-/jest-changed-files-29.7.0.tgz#1c06d07e77c78e1585d020424dedc10d6e17ac3a" @@ -7238,6 +7518,11 @@ json-stable-stringify-without-jsonify@^1.0.1: resolved "https://registry.yarnpkg.com/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz#9db7b59496ad3f3cfef30a75142d2d930ad72651" integrity sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw== +json-stream-stringify@^3.1.4: + version "3.1.5" + resolved "https://registry.yarnpkg.com/json-stream-stringify/-/json-stream-stringify-3.1.5.tgz#7184383b397a83ac5da33b62371217522e6ac2f6" + integrity sha512-wurRuTiw27mck9MWaUIGAunfwqhPDxnXQVN/+Rzi+IEQUUALU10AZs1nWkSdtjH7PAVuAUcqQjH11S/JHOWeaA== + json-stringify-safe@~5.0.1: version "5.0.1" resolved "https://registry.yarnpkg.com/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz#1296a2d58fd45f19a0f6ce01d65701e2c735b6eb" @@ -7663,7 +7948,7 @@ lodash@^4.17.11, lodash@^4.17.12, lodash@^4.17.14, lodash@^4.17.15, lodash@^4.17 resolved "https://registry.yarnpkg.com/lodash/-/lodash-4.17.21.tgz#679591c564c3bffaae8454cf0b3df370c3d6911c" integrity sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg== -log-symbols@4.1.0, log-symbols@^4.1.0: +log-symbols@4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/log-symbols/-/log-symbols-4.1.0.tgz#3fbdbb95b4683ac9fc785111e792e558d4abd503" integrity sha512-8XPvpAA8uyhfteu8pIvQxpJZ7SYYdpUivZpGy6sFsBuKRY/7rQGavedeB8aK+Zkyq6upMFVL/9AW6vOYzfRyLg== @@ -7700,6 +7985,13 @@ lru-cache@^5.1.1: dependencies: yallist "^3.0.2" +lru-cache@^6.0.0: + version "6.0.0" + resolved "https://registry.yarnpkg.com/lru-cache/-/lru-cache-6.0.0.tgz#6d6fe6570ebd96aaf90fcad1dafa3b2566db3a94" + integrity sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA== + dependencies: + yallist "^4.0.0" + lru_map@^0.3.3: version "0.3.3" resolved "https://registry.yarnpkg.com/lru_map/-/lru_map-0.3.3.tgz#b5c8351b9464cbd750335a79650a0ec0e56118dd" @@ -7885,11 +8177,11 @@ micro-ftch@^0.3.1: integrity sha512-/0LLxhzP0tfiR5hcQebtudP56gUurs2CLkGarnCiB/OqEyUFQ6U3paQi/tgLv0hBJYt2rnr9MNpxz4fiiugstg== micromatch@^4.0.4: - version "4.0.7" - resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.7.tgz#33e8190d9fe474a9895525f5618eee136d46c2e5" - integrity sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q== + version "4.0.5" + resolved "https://registry.yarnpkg.com/micromatch/-/micromatch-4.0.5.tgz#bc8999a7cbbf77cdc89f132f6e467051b49090c6" + integrity sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA== dependencies: - braces "^3.0.3" + braces "^3.0.2" picomatch "^2.3.1" miller-rabin@^4.0.0: @@ -7956,6 +8248,13 @@ minimatch@4.2.1: dependencies: brace-expansion "^1.1.7" +minimatch@5.0.1: + version "5.0.1" + resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.0.1.tgz#fb9022f7528125187c92bd9e9b6366be1cf3415b" + integrity sha512-nLDxIFRyhDblz3qMuq+SoRZED4+miJ/G+tdDrjkkkRnjAsBexeGpgjLEQ0blJy7rHhR2b93rhQY4SvyWu9v03g== + dependencies: + brace-expansion "^2.0.1" + minimatch@9.0.3: version "9.0.3" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.3.tgz#a6e00c3de44c3a542bfaae70abfc22420a6da825" @@ -7963,7 +8262,7 @@ minimatch@9.0.3: dependencies: brace-expansion "^2.0.1" -minimatch@^5.0.1, minimatch@^5.1.6, minimatch@~5.1.2: +minimatch@^5.0.1, minimatch@~5.1.2: version "5.1.6" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-5.1.6.tgz#1cfcb8cf5522ea69952cd2af95ae09477f122a96" integrity sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g== @@ -7977,14 +8276,7 @@ minimatch@^7.4.3: dependencies: brace-expansion "^2.0.1" -minimatch@^9.0.3: - version "9.0.4" - resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.4.tgz#8e49c731d1749cbec05050ee5145147b32496a51" - integrity sha512-KqWh+VchfxcMNRAJjj2tnsSJdNbHsVgnkBhTNrW7AjVo6OvLtxw8zfT9oLw1JSohlFzJ8jCoTgaoXvJ+kHt6fw== - dependencies: - brace-expansion "^2.0.1" - -minimatch@^9.0.4: +minimatch@^9.0.3, minimatch@^9.0.4: version "9.0.5" resolved "https://registry.yarnpkg.com/minimatch/-/minimatch-9.0.5.tgz#d74f9dd6b57d83d8e98cfb82133b03978bc929e5" integrity sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow== @@ -8003,12 +8295,7 @@ minimist@^1.2.0, minimist@^1.2.5, minimist@^1.2.6, minimist@^1.2.8, minimist@~1. resolved "https://registry.yarnpkg.com/minimist/-/minimist-1.2.8.tgz#c1a464e7693302e082a075cee0c057741ac4772c" integrity sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA== -"minipass@^5.0.0 || ^6.0.2 || ^7.0.0": - version "7.1.1" - resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.1.tgz#f7f85aff59aa22f110b20e27692465cf3bf89481" - integrity sha512-UZ7eQ+h8ywIRAW1hIEl2AqdwzJucU/Kp59+8kkZeSvafXhZjul247BvIJjEVFVeON6d7lM46XX1HXCduKAS8VA== - -minipass@^7.1.2: +"minipass@^5.0.0 || ^6.0.2 || ^7.0.0", minipass@^7.1.2: version "7.1.2" resolved "https://registry.yarnpkg.com/minipass/-/minipass-7.1.2.tgz#93a9626ce5e5e66bd4db86849e7515e92340a707" integrity sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw== @@ -8053,30 +8340,30 @@ mocha-steps@^1.3.0: integrity sha512-KZvpMJTqzLZw3mOb+EEuYi4YZS41C9iTnb7skVFRxHjUd1OYbl64tCMSmpdIRM9LnwIrSOaRfPtNpF5msgv6Eg== mocha@^10.0.0, mocha@^10.2.0: - version "10.7.0" - resolved "https://registry.yarnpkg.com/mocha/-/mocha-10.7.0.tgz#9e5cbed8fa9b37537a25bd1f7fb4f6fc45458b9a" - integrity sha512-v8/rBWr2VO5YkspYINnvu81inSz2y3ODJrhO175/Exzor1RcEZZkizgE2A+w/CAXXoESS8Kys5E62dOHGHzULA== + version "10.4.0" + resolved "https://registry.yarnpkg.com/mocha/-/mocha-10.4.0.tgz#ed03db96ee9cfc6d20c56f8e2af07b961dbae261" + integrity sha512-eqhGB8JKapEYcC4ytX/xrzKforgEc3j1pGlAXVy3eRwrtAy5/nIfT1SvgGzfN0XZZxeLq0aQWkOUAmqIJiv+bA== dependencies: - ansi-colors "^4.1.3" - browser-stdout "^1.3.1" - chokidar "^3.5.3" - debug "^4.3.5" - diff "^5.2.0" - escape-string-regexp "^4.0.0" - find-up "^5.0.0" - glob "^8.1.0" - he "^1.2.0" - js-yaml "^4.1.0" - log-symbols "^4.1.0" - minimatch "^5.1.6" - ms "^2.1.3" - serialize-javascript "^6.0.2" - strip-json-comments "^3.1.1" - supports-color "^8.1.1" - workerpool "^6.5.1" - yargs "^16.2.0" - yargs-parser "^20.2.9" - yargs-unparser "^2.0.0" + ansi-colors "4.1.1" + browser-stdout "1.3.1" + chokidar "3.5.3" + debug "4.3.4" + diff "5.0.0" + escape-string-regexp "4.0.0" + find-up "5.0.0" + glob "8.1.0" + he "1.2.0" + js-yaml "4.1.0" + log-symbols "4.1.0" + minimatch "5.0.1" + ms "2.1.3" + serialize-javascript "6.0.0" + strip-json-comments "3.1.1" + supports-color "8.1.1" + workerpool "6.2.1" + yargs "16.2.0" + yargs-parser "20.2.4" + yargs-unparser "2.0.0" mocha@^9.0.2: version "9.2.2" @@ -8123,7 +8410,7 @@ ms@2.1.2: resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.2.tgz#d09d1f357b443f493382a8eb3ccd183872ae6009" integrity sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w== -ms@2.1.3, ms@^2.1.1, ms@^2.1.3: +ms@2.1.3, ms@^2.1.1: version "2.1.3" resolved "https://registry.yarnpkg.com/ms/-/ms-2.1.3.tgz#574c8138ce1d2b5861f0b44579dbadd60c6615b2" integrity sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA== @@ -8133,10 +8420,10 @@ mute-stream@0.0.7: resolved "https://registry.yarnpkg.com/mute-stream/-/mute-stream-0.0.7.tgz#3075ce93bc21b8fab43e1bc4da7e8115ed1e7bab" integrity sha512-r65nCZhrbXXb6dXOACihYApHw2Q6pV0M3V0PSxd74N0+D8nzAdEAITq2oAjA1jVnKI+tGvEBUpqiMh0+rW6zDQ== -nan@^2.18.0, nan@^2.19.0: - version "2.20.0" - resolved "https://registry.yarnpkg.com/nan/-/nan-2.20.0.tgz#08c5ea813dd54ed16e5bd6505bf42af4f7838ca3" - integrity sha512-bk3gXBZDGILuuo/6sKtr0DQmSThYHLtNCdSdXk9YkxD/jK6X2vmCyyXBBxyqZ4XcnzTyYEAThfX3DCEnLf6igw== +nan@^2.17.0, nan@^2.18.0: + version "2.19.0" + resolved "https://registry.yarnpkg.com/nan/-/nan-2.19.0.tgz#bb58122ad55a6c5bc973303908d5b16cfdd5a8c0" + integrity sha512-nO1xXxfh/RWNxfd/XPfbIfFk5vgLsAxUR9y5O0cHMJu/AW9U95JLXqthYHjEp+8gQ5p96K9jUp8nbVOxCdRbtw== nanoid@3.3.1: version "3.3.1" @@ -8173,7 +8460,7 @@ nice-try@^1.0.4: resolved "https://registry.yarnpkg.com/nice-try/-/nice-try-1.0.5.tgz#a3378a7696ce7d223e88fc9b764bd7ef1089e366" integrity sha512-1nh45deeb5olNY7eX82BkPO7SSxR5SSYJiPTrTdFUVYwAl8CKMA5N9PjTYkHiRjisVcxcQ1HXdLhx2qxxJzLNQ== -nise@^5.1.9: +nise@^5.1.5: version "5.1.9" resolved "https://registry.yarnpkg.com/nise/-/nise-5.1.9.tgz#0cb73b5e4499d738231a473cd89bd8afbb618139" integrity sha512-qOnoujW4SV6e40dYxJOb3uvuoPHtmLzIk4TFo+j0jPJoC+5Z9xja5qH5JZobEPsa8+YYphMrOSwnrshEhG2qww== @@ -8225,9 +8512,9 @@ node-gyp-build@4.4.0: integrity sha512-amJnQCcgtRVw9SvoebO3BKGESClrfXGCUTX9hSn1OuGQTQBOZmVd0Z0OlecpuRksKvbsUqALE8jls/ErClAPuQ== node-gyp-build@^4.2.0, node-gyp-build@^4.3.0: - version "4.8.1" - resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.8.1.tgz#976d3ad905e71b76086f4f0b0d3637fe79b6cda5" - integrity sha512-OSs33Z9yWr148JZcbZd5WiAXhh/n9z8TxQcdMhIOlpN9AhWpLfvVFO73+m77bBABQMaY9XSvIa+qk0jlI7Gcaw== + version "4.8.0" + resolved "https://registry.yarnpkg.com/node-gyp-build/-/node-gyp-build-4.8.0.tgz#3fee9c1731df4581a3f9ead74664369ff00d26dd" + integrity sha512-u6fs2AEUljNho3EYTJNBfImO5QTo/J/1Etd+NVdCj7qWKUSN/bSLkZwhDv7I+w/MSC6qJ4cknepkAYykDdK8og== node-int64@^0.4.0: version "0.4.0" @@ -8235,9 +8522,9 @@ node-int64@^0.4.0: integrity sha512-O5lz91xSOeoXP6DulyHfllpq+Eg00MWitZIbtPfoSEvqIHdl5gfcY6hYzDWnj0qD5tz52PI08u9qUvSVeUBeHw== node-releases@^2.0.14: - version "2.0.18" - resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.18.tgz#f010e8d35e2fe8d6b2944f03f70213ecedc4ca3f" - integrity sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g== + version "2.0.14" + resolved "https://registry.yarnpkg.com/node-releases/-/node-releases-2.0.14.tgz#2ffb053bceb8b2be8495ece1ab6ce600c4461b0b" + integrity sha512-y10wOWt8yZpqXmOgRo77WaHEmhYQYGNA6y421PKsKYWEK8aW+cqAphborZDhqfyKrbZEN92CN1X2KbafY2s7Yw== nofilter@^3.1.0: version "3.1.0" @@ -8312,9 +8599,9 @@ object-assign@^4.1.0: integrity sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg== object-inspect@^1.13.1: - version "1.13.2" - resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.2.tgz#dea0088467fb991e67af4058147a24824a3043ff" - integrity sha512-IRZSRuzJiynemAXPYtPe5BoI/RESNYR7TYm50MC5Mqbd3Jmw5y790sErYw3V6SryFJD64b74qQQs9wn5Bg/k3g== + version "1.13.1" + resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.1.tgz#b96c6109324ccfef6b12216a956ca4dc2ff94bc2" + integrity sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ== object-keys@^1.1.1: version "1.1.1" @@ -8403,16 +8690,16 @@ optionator@^0.8.1: word-wrap "~1.2.3" optionator@^0.9.1, optionator@^0.9.3: - version "0.9.4" - resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.4.tgz#7ea1c1a5d91d764fb282139c88fe11e182a3a734" - integrity sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g== + version "0.9.3" + resolved "https://registry.yarnpkg.com/optionator/-/optionator-0.9.3.tgz#007397d44ed1872fdc6ed31360190f81814e2c64" + integrity sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg== dependencies: + "@aashutoshrathi/word-wrap" "^1.2.3" deep-is "^0.1.3" fast-levenshtein "^2.0.6" levn "^0.4.1" prelude-ls "^1.2.1" type-check "^0.4.0" - word-wrap "^1.2.5" ordinal@1.0.3, ordinal@^1.0.3: version "1.0.3" @@ -8496,9 +8783,9 @@ p-try@^2.0.0: integrity sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ== package-json-from-dist@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.0.tgz#e501cd3094b278495eb4258d4c9f6d5ac3019f00" - integrity sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw== + version "1.0.1" + resolved "https://registry.yarnpkg.com/package-json-from-dist/-/package-json-from-dist-1.0.1.tgz#4f1471a010827a86f94cfd9b0727e36d267de505" + integrity sha512-UEZIS3/by4OC8vL3P2dTXRETpebLI2NiI5vIrjaD/5UtrkFX/tNbwjTSRAGC/+7CAo2pIcBaRgWmcBBHcsaCIw== package-json@^8.1.0: version "8.1.1" @@ -8689,9 +8976,9 @@ pg-types@^4.0.1: postgres-range "^1.1.1" pg@^8.11.3: - version "8.12.0" - resolved "https://registry.yarnpkg.com/pg/-/pg-8.12.0.tgz#9341724db571022490b657908f65aee8db91df79" - integrity sha512-A+LHUSnwnxrnL/tZ+OLfqR1SxLN3c/pgDztZ47Rpbsd4jUytsTtwQo/TLPRzPJMp/1pbhYVhH9cuSZLAajNfjQ== + version "8.11.5" + resolved "https://registry.yarnpkg.com/pg/-/pg-8.11.5.tgz#e722b0a5f1ed92931c31758ebec3ddf878dd4128" + integrity sha512-jqgNHSKL5cbDjFlHyYsCXmQDrfIX/3RsNwYqpd4N0Kt8niLuNoRNH+aazv6cOd43gPh9Y4DjQCtb+X0MH0Hvnw== dependencies: pg-connection-string "^2.6.4" pg-pool "^3.6.2" @@ -8708,10 +8995,10 @@ pgpass@1.x: dependencies: split2 "^4.1.0" -picocolors@^1.0.0, picocolors@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.1.tgz#a8ad579b571952f0e5d25892de5445bcfe25aaa1" - integrity sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew== +picocolors@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.0.0.tgz#cb5bdc74ff3f51892236eaf79d68bc44564ab81c" + integrity sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ== picomatch@^2.0.4, picomatch@^2.2.1, picomatch@^2.2.3, picomatch@^2.3.1: version "2.3.1" @@ -8855,9 +9142,9 @@ prettier@^2.1.2, prettier@^2.3.1, prettier@^2.3.2, prettier@^2.8.3: integrity sha512-tdN8qQGvNjw4CHbY+XXk0JgCXn9QiF21a55rBe5LJAU+kDyC4WQn4+awm2Xfk2lQMk5fKup9XgzTZtGkjBdP9Q== prettier@^3.0.3: - version "3.3.3" - resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.3.3.tgz#30c54fe0be0d8d12e6ae61dbb10109ea00d53105" - integrity sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew== + version "3.2.5" + resolved "https://registry.yarnpkg.com/prettier/-/prettier-3.2.5.tgz#e52bc3090586e824964a8813b09aba6233b28368" + integrity sha512-3/GWa9aOC0YeD7LUfvOG2NiDyhOWRvt1k+rcKhOuYnMY24iiCphgneUfJDyFXd6rZCAnuLBv6UeAULtrhT/F4A== pretty-format@^29.0.0, pretty-format@^29.7.0: version "29.7.0" @@ -8921,9 +9208,9 @@ proto-list@~1.2.1: integrity sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA== protobufjs@^7.2.5: - version "7.3.2" - resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-7.3.2.tgz#60f3b7624968868f6f739430cfbc8c9370e26df4" - integrity sha512-RXyHaACeqXeqAKGLDl68rQKbmObRsTIn4TYVUUug1KfS47YWCo5MacGITEryugIgZqORCvJWEk4l449POg5Txg== + version "7.2.6" + resolved "https://registry.yarnpkg.com/protobufjs/-/protobufjs-7.2.6.tgz#4a0ccd79eb292717aacf07530a07e0ed20278215" + integrity sha512-dgJaEDDL6x8ASUZ1YqWciTRrdOuYNzoOf27oHNfdyvKqHr5i0FV7FSLU+aIeFjyFgVxrpTOtQUi0BLLBymZaBw== dependencies: "@protobufjs/aspromise" "^1.1.2" "@protobufjs/base64" "^1.1.2" @@ -8984,10 +9271,10 @@ pure-rand@^6.0.0: resolved "https://registry.yarnpkg.com/pure-rand/-/pure-rand-6.1.0.tgz#d173cf23258231976ccbdb05247c9787957604f2" integrity sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA== -qs@^6.12.3, qs@^6.4.0: - version "6.12.3" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.12.3.tgz#e43ce03c8521b9c7fd7f1f13e514e5ca37727754" - integrity sha512-AWJm14H1vVaO/iNZ4/hO+HyaTehuy9nRqVdkTqlJt0HWvBiBIEXFmb4C0DGeYo3Xes9rrEW+TxHsaigCbN5ICQ== +qs@^6.11.2, qs@^6.4.0: + version "6.12.0" + resolved "https://registry.yarnpkg.com/qs/-/qs-6.12.0.tgz#edd40c3b823995946a8a0b1f208669c7a200db77" + integrity sha512-trVZiI6RMOkO476zLGaBIzszOdFPnCCXHPG9kn0yuS1uz6xdVxPfZdB3vUig9pxPFDM9BRAgz/YUIVQ1/vuiUg== dependencies: side-channel "^1.0.6" @@ -9052,9 +9339,9 @@ rc@1.2.8, rc@~1.2.7: strip-json-comments "~2.0.1" react-is@^18.0.0: - version "18.3.1" - resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.3.1.tgz#e83557dc12eae63a99e003a46388b1dcbb44db7e" - integrity sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg== + version "18.2.0" + resolved "https://registry.yarnpkg.com/react-is/-/react-is-18.2.0.tgz#199431eeaaa2e09f86427efbb4f1473edb47609b" + integrity sha512-xWGDIW6x921xtzPkhiULtthJHoJvBbF3q26fzloPCK0hsvxtPVelvftw3zjbHWSkR2km9Z+4uxbDDK/6Zw9B8w== read-pkg@^3.0.0: version "3.0.0" @@ -9097,6 +9384,11 @@ readable-stream@~1.0.26-4: isarray "0.0.1" string_decoder "~0.10.x" +readdirp@^4.0.1: + version "4.0.1" + resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-4.0.1.tgz#b2fe35f8dca63183cd3b86883ecc8f720ea96ae6" + integrity sha512-GkMg9uOTpIWWKbSsgwb5fA4EavTR+SG/PMPoAY8hkhHfEEY0/vqljY+XHqtDf2cr2IJtoNRDbrrEpZUiZCkYRw== + readdirp@~3.6.0: version "3.6.0" resolved "https://registry.yarnpkg.com/readdirp/-/readdirp-3.6.0.tgz#74a370bd857116e245b29cc97340cd431a02a6c7" @@ -9453,10 +9745,17 @@ semver@^6.3.0, semver@^6.3.1: resolved "https://registry.yarnpkg.com/semver/-/semver-6.3.1.tgz#556d2ef8689146e46dcea4bfdd095f3434dffcb4" integrity sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA== -semver@^7.2.1, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.5.1, semver@^7.5.2, semver@^7.5.3, semver@^7.5.4, semver@^7.6.2: - version "7.6.3" - resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.3.tgz#980f7b5550bc175fb4dc09403085627f9eb33143" - integrity sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A== +semver@^7.2.1, semver@^7.3.4, semver@^7.3.5, semver@^7.3.7, semver@^7.5.1, semver@^7.5.2, semver@^7.5.3, semver@^7.5.4: + version "7.6.0" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.0.tgz#1a46a4db4bffcccd97b743b5005c8325f23d4e2d" + integrity sha512-EnwXhrlwXMk9gKu5/flx5sv/an57AkRplG3hTK68W7FRDN+k+OWBj65M7719OkA82XLBxrcX0KSHj+X5COhOVg== + dependencies: + lru-cache "^6.0.0" + +semver@^7.6.2: + version "7.6.2" + resolved "https://registry.yarnpkg.com/semver/-/semver-7.6.2.tgz#1e3b34759f896e8f14d6134732ce798aeb0c6e13" + integrity sha512-FNAIBWCx9qcRhoHcgcJ0gvU7SN1lYU2ZXuSfl04bSC5OpvDHFyJCjdNHomPXxjQlCBU67YW64PzY7/VIEH7F2w== serialize-javascript@6.0.0: version "6.0.0" @@ -9465,13 +9764,6 @@ serialize-javascript@6.0.0: dependencies: randombytes "^2.1.0" -serialize-javascript@^6.0.2: - version "6.0.2" - resolved "https://registry.yarnpkg.com/serialize-javascript/-/serialize-javascript-6.0.2.tgz#defa1e055c83bf6d59ea805d8da862254eb6a6c2" - integrity sha512-Saa1xPByTTq2gdeFZYLLo+RFE35NHZkAbqZeWNd3BpzppeVisAqpDjcp8dyf6uIvEqJRd46jemmyA4iFIeVk8g== - dependencies: - randombytes "^2.1.0" - set-function-length@^1.2.1: version "1.2.2" resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.2.2.tgz#aac72314198eaed975cf77b2c3b6b880695e5449" @@ -9589,16 +9881,16 @@ sinon-chai@^3.7.0: integrity sha512-mf5NURdUaSdnatJx3uhoBOrY9dtL19fiOtAdT1Azxg3+lNJFiuN0uzaU3xX1LeAfL17kHQhTAJgpsfhbMJMY2g== sinon@^17.0.1: - version "17.0.2" - resolved "https://registry.yarnpkg.com/sinon/-/sinon-17.0.2.tgz#470894bcc2d24b01bad539722ea46da949892405" - integrity sha512-uihLiaB9FhzesElPDFZA7hDcNABzsVHwr3YfmM9sBllVwab3l0ltGlRV1XhpNfIacNDLGD1QRZNLs5nU5+hTuA== + version "17.0.1" + resolved "https://registry.yarnpkg.com/sinon/-/sinon-17.0.1.tgz#26b8ef719261bf8df43f925924cccc96748e407a" + integrity sha512-wmwE19Lie0MLT+ZYNpDymasPHUKTaZHUH/pKEubRXIzySv9Atnlw+BUMGCzWgV7b7wO+Hw6f1TEOr0IUnmU8/g== dependencies: - "@sinonjs/commons" "^3.0.1" + "@sinonjs/commons" "^3.0.0" "@sinonjs/fake-timers" "^11.2.2" "@sinonjs/samsam" "^8.0.0" - diff "^5.2.0" - nise "^5.1.9" - supports-color "^7" + diff "^5.1.0" + nise "^5.1.5" + supports-color "^7.2.0" sinon@^18.0.0: version "18.0.0" @@ -9828,9 +10120,9 @@ spdx-expression-parse@^3.0.0: spdx-license-ids "^3.0.0" spdx-license-ids@^3.0.0: - version "3.0.18" - resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.18.tgz#22aa922dcf2f2885a6494a261f2d8b75345d0326" - integrity sha512-xxRs31BqRYHwiMzudOrpSiHtZ8i/GeionCBDSilhYRj+9gIcI8wCZTlXZKu9vZIVqViP3dcp9qE5G6AlIaD+TQ== + version "3.0.17" + resolved "https://registry.yarnpkg.com/spdx-license-ids/-/spdx-license-ids-3.0.17.tgz#887da8aa73218e51a1d917502d79863161a93f9c" + integrity sha512-sh8PWc/ftMqAAdFiBu6Fy6JUOYjqDJBJvIhpfDMyHrr0Rbp5liZqd4TjtQ/RgfLjKFZb+LMx5hpml5qOWy0qvg== split-ca@^1.0.0, split-ca@^1.0.1: version "1.0.1" @@ -9914,7 +10206,7 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: +"string-width-cjs@npm:string-width@^4.2.0": version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -9931,6 +10223,15 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" +string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: + version "4.2.3" + resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" + integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== + dependencies: + emoji-regex "^8.0.0" + is-fullwidth-code-point "^3.0.0" + strip-ansi "^6.0.1" + string-width@^5.0.1, string-width@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" @@ -9997,7 +10298,7 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: +"strip-ansi-cjs@npm:strip-ansi@^6.0.1": version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -10018,6 +10319,13 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" +strip-ansi@^6.0.0, strip-ansi@^6.0.1: + version "6.0.1" + resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" + integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== + dependencies: + ansi-regex "^5.0.1" + strip-ansi@^7.0.1: version "7.1.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" @@ -10057,7 +10365,7 @@ strip-json-comments@~2.0.1: resolved "https://registry.yarnpkg.com/strip-json-comments/-/strip-json-comments-2.0.1.tgz#3c531942e908c2697c0ec344858c286c7ca0a60a" integrity sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ== -supports-color@8.1.1, supports-color@^8.0.0, supports-color@^8.1.1: +supports-color@8.1.1, supports-color@^8.0.0: version "8.1.1" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-8.1.1.tgz#cd6fc17e28500cff56c1b86c0a7fd4a54a73005c" integrity sha512-MpUEN2OodtUzxvKQl72cUF7RQ5EiHsGvSsVG0ia9c5RbWGL2CI4C7EpPS8UTBIplnlzZiNuV56w+FuNxy3ty2Q== @@ -10078,7 +10386,7 @@ supports-color@^5.3.0: dependencies: has-flag "^3.0.0" -supports-color@^7, supports-color@^7.1.0: +supports-color@^7, supports-color@^7.1.0, supports-color@^7.2.0: version "7.2.0" resolved "https://registry.yarnpkg.com/supports-color/-/supports-color-7.2.0.tgz#1b7dcdcb32b8138801b3e478ba6a51caa89648da" integrity sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw== @@ -10106,10 +10414,10 @@ sync-rpc@^1.2.1: dependencies: get-port "^3.1.0" -synckit@^0.9.1: - version "0.9.1" - resolved "https://registry.yarnpkg.com/synckit/-/synckit-0.9.1.tgz#febbfbb6649979450131f64735aa3f6c14575c88" - integrity sha512-7gr8p9TQP6RAHusBOSLs46F4564ZrjV8xFmw5zCmgmhGUcw2hxsShhJ6CEiHQMgPDwAQ1fWHPM0ypc4RMAig4A== +synckit@^0.8.6: + version "0.8.8" + resolved "https://registry.yarnpkg.com/synckit/-/synckit-0.8.8.tgz#fe7fe446518e3d3d49f5e429f443cf08b6edfcd7" + integrity sha512-HwOKAP7Wc5aRGYdKH+dw0PRRpbO841v2DENBtjnR5HFWoiNByAl7vrx3p0G/rCyYXQsrxqtX48TImFtPcIHSpQ== dependencies: "@pkgr/core" "^0.1.0" tslib "^2.6.2" @@ -10120,6 +10428,8 @@ synckit@^0.9.1: "@matterlabs/hardhat-zksync-deploy" "^0.7.0" "@matterlabs/hardhat-zksync-solc" "=1.1.4" "@matterlabs/hardhat-zksync-verify" "^1.4.3" + "@openzeppelin/contracts-upgradeable-v4" "npm:@openzeppelin/contracts-upgradeable@4.9.5" + "@openzeppelin/contracts-v4" "npm:@openzeppelin/contracts@4.9.5" commander "^9.4.1" eslint "^8.51.0" eslint-plugin-import "^2.29.0" @@ -10359,12 +10669,11 @@ ts-generator@^0.1.1: ts-essentials "^1.0.0" ts-jest@^29.0.1: - version "29.2.3" - resolved "https://registry.yarnpkg.com/ts-jest/-/ts-jest-29.2.3.tgz#3d226ac36b8b820151a38f164414f9f6b412131f" - integrity sha512-yCcfVdiBFngVz9/keHin9EnsrQtQtEu3nRykNy9RVp+FiPFFbPJ3Sg6Qg4+TkmH0vMP5qsTKgXSsk80HRwvdgQ== + version "29.1.2" + resolved "https://registry.yarnpkg.com/ts-jest/-/ts-jest-29.1.2.tgz#7613d8c81c43c8cb312c6904027257e814c40e09" + integrity sha512-br6GJoH/WUX4pu7FbZXuWGKGNDuU7b8Uj77g/Sp7puZV6EXzuByl6JrECvm0MzVzSTkSHWTihsXt+5XYER5b+g== dependencies: bs-logger "0.x" - ejs "^3.1.10" fast-json-stable-stringify "2.x" jest-util "^29.0.0" json5 "^2.2.3" @@ -10429,9 +10738,9 @@ tslib@^1.8.1, tslib@^1.9.0, tslib@^1.9.3: integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== tslib@^2.6.2: - version "2.6.3" - resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.3.tgz#0438f810ad7a9edcde7a241c3d80db693c8cbfe0" - integrity sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ== + version "2.6.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" + integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== tsort@0.0.1: version "0.0.1" @@ -10481,12 +10790,12 @@ type-check@~0.3.2: dependencies: prelude-ls "~1.1.2" -type-detect@4.0.8: +type-detect@4.0.8, type-detect@^4.0.0, type-detect@^4.0.8: version "4.0.8" resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.0.8.tgz#7646fb5f18871cfbb7749e69bd39a6388eb7450c" integrity sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g== -type-detect@^4.0.0, type-detect@^4.0.8, type-detect@^4.1.0: +type-detect@^4.1.0: version "4.1.0" resolved "https://registry.yarnpkg.com/type-detect/-/type-detect-4.1.0.tgz#deb2453e8f08dcae7ae98c626b13dddb0155906c" integrity sha512-Acylog8/luQ8L7il+geoSxhEkazvkslg7PSNKOX59mbB9cOveP5aq9h74Y7YU8yDpJwetzQQrfIwtf4Wp4LKcw== @@ -10590,9 +10899,9 @@ typescript@^4.3.5, typescript@^4.5.5, typescript@^4.6.4: integrity sha512-1FXk9E2Hm+QzZQ7z+McJiHL4NW1F2EzMu9Nq9i3zAaGqibafqYwCVU6WyWAuyQRRzOlxou8xZSyXLEN8oKj24g== typescript@^5.2.2: - version "5.5.4" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.5.4.tgz#d9852d6c82bad2d2eda4fd74a5762a8f5909e9ba" - integrity sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q== + version "5.4.4" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.4.4.tgz#eb2471e7b0a5f1377523700a21669dce30c2d952" + integrity sha512-dGE2Vv8cpVvw28v8HCPqyb08EzbBURxDpuhJvTrusShUfGnhHBafDsLdS1EhhxyL6BJQE+2cT3dDPAv+MQ6oLw== typical@^2.6.0, typical@^2.6.1: version "2.6.1" @@ -10615,9 +10924,9 @@ uc.micro@^1.0.1, uc.micro@^1.0.5: integrity sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA== uglify-js@^3.1.4: - version "3.19.1" - resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.19.1.tgz#2d5df6a0872c43da43187968308d7741d44b8056" - integrity sha512-y/2wiW+ceTYR2TSSptAhfnEtpLaQ4Ups5zrjB2d3kuVxHj16j/QJwPl5PvuGy9uARb39J0+iKxcRPvtpsx4A4A== + version "3.17.4" + resolved "https://registry.yarnpkg.com/uglify-js/-/uglify-js-3.17.4.tgz#61678cf5fa3f5b7eb789bb345df29afb8257c22c" + integrity sha512-T9q82TJI9e/C1TAxYvfb16xO120tMVFZrGA3f9/P4424DNu6ypK103y0GPFVa17yotwSyZW5iYXgjYHkGrJW/g== unbox-primitive@^1.0.2: version "1.0.2" @@ -10634,11 +10943,6 @@ undici-types@~5.26.4: resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-5.26.5.tgz#bcd539893d00b56e964fd2657a4866b221a65617" integrity sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA== -undici-types@~6.11.1: - version "6.11.1" - resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.11.1.tgz#432ea6e8efd54a48569705a699e62d8f4981b197" - integrity sha512-mIDEX2ek50x0OlRgxryxsenE5XaQD4on5U2inY7RApK3SOJpofyw7uW2AyfMKkhAxXIceo2DeWGVGwyvng1GNQ== - undici@^5.14.0: version "5.28.4" resolved "https://registry.yarnpkg.com/undici/-/undici-5.28.4.tgz#6b280408edb6a1a604a9b20340f45b422e373068" @@ -10647,9 +10951,9 @@ undici@^5.14.0: "@fastify/busboy" "^2.0.0" undici@^6.18.2: - version "6.19.4" - resolved "https://registry.yarnpkg.com/undici/-/undici-6.19.4.tgz#5ec3b191699a1678ee0aa9ed14e443a682d0f7a8" - integrity sha512-i3uaEUwNdkRq2qtTRRJb13moW5HWqviu7Vl7oYRYz++uPtGHJj+x7TGjcEuwS5Mt2P4nA0U9dhIX3DdB6JGY0g== + version "6.19.2" + resolved "https://registry.yarnpkg.com/undici/-/undici-6.19.2.tgz#231bc5de78d0dafb6260cf454b294576c2f3cd31" + integrity sha512-JfjKqIauur3Q6biAtHJ564e3bWa8VvT+7cSiOJHFbX4Erv6CLGDpg8z+Fmg/1OI/47RA+GI2QZaF48SSaLvyBA== universalify@^0.1.0: version "0.1.2" @@ -10671,13 +10975,13 @@ untildify@^3.0.3: resolved "https://registry.yarnpkg.com/untildify/-/untildify-3.0.3.tgz#1e7b42b140bcfd922b22e70ca1265bfe3634c7c9" integrity sha512-iSk/J8efr8uPT/Z4eSUywnqyrQU7DSdMfdqK4iWEaUVVmcP5JcnpRqmVMwcwcnmI1ATFNgC5V90u09tBynNFKA== -update-browserslist-db@^1.1.0: - version "1.1.0" - resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.1.0.tgz#7ca61c0d8650766090728046e416a8cde682859e" - integrity sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ== +update-browserslist-db@^1.0.13: + version "1.0.13" + resolved "https://registry.yarnpkg.com/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz#3c5e4f5c083661bd38ef64b6328c26ed6c8248c4" + integrity sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg== dependencies: - escalade "^3.1.2" - picocolors "^1.0.1" + escalade "^3.1.1" + picocolors "^1.0.0" uri-js@^4.2.2: version "4.4.1" @@ -10687,12 +10991,12 @@ uri-js@^4.2.2: punycode "^2.1.0" url@^0.11.0: - version "0.11.4" - resolved "https://registry.yarnpkg.com/url/-/url-0.11.4.tgz#adca77b3562d56b72746e76b330b7f27b6721f3c" - integrity sha512-oCwdVC7mTuWiPyjLUz/COz5TLk6wgp0RCsN+wHZ2Ekneac9w8uuV0njcbbie2ME+Vs+d6duwmYuR3HgQXs1fOg== + version "0.11.3" + resolved "https://registry.yarnpkg.com/url/-/url-0.11.3.tgz#6f495f4b935de40ce4a0a52faee8954244f3d3ad" + integrity sha512-6hxOLGfZASQK/cijlZnZJTq8OXAkt/3YGfQX45vvMYXpZoo8NdWZcY73K108Jf759lS1Bv/8wXnHDTSz17dSRw== dependencies: punycode "^1.4.1" - qs "^6.12.3" + qs "^6.11.2" utf-8-validate@5.0.7: version "5.0.7" @@ -10739,9 +11043,9 @@ v8-compile-cache@^2.0.3: integrity sha512-ocyWc3bAHBB/guyqJQVI5o4BZkPhznPYUG2ea80Gond/BgNWpap8TOmLSeeQG7bnh2KMISxskdADG59j7zruhw== v8-to-istanbul@^9.0.1: - version "9.3.0" - resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.3.0.tgz#b9572abfa62bd556c16d75fdebc1a411d5ff3175" - integrity sha512-kiGUalWN+rgBJ/1OHZsBtU4rXZOfj/7rKQxULKlIzwzQSvMJUUNgPwJEEh7gU6xEVxC0ahoOBvN2YI8GH6FNgA== + version "9.2.0" + resolved "https://registry.yarnpkg.com/v8-to-istanbul/-/v8-to-istanbul-9.2.0.tgz#2ed7644a245cddd83d4e087b9b33b3e62dfd10ad" + integrity sha512-/EH/sDgxU2eGxajKdwLCDmQ4FWq+kpi3uCmBGpw1xJtnAxEjlD8j8PEiGWpCIMIs3ciNAgH0d3TTJiUkYzyZjA== dependencies: "@jridgewell/trace-mapping" "^0.3.12" "@types/istanbul-lib-coverage" "^2.0.1" @@ -10846,7 +11150,7 @@ widest-line@^3.1.0: dependencies: string-width "^4.0.0" -word-wrap@^1.2.5, word-wrap@~1.2.3: +word-wrap@~1.2.3: version "1.2.5" resolved "https://registry.yarnpkg.com/word-wrap/-/word-wrap-1.2.5.tgz#d2c45c6dd4fbce621a66f136cbe328afd0410b34" integrity sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA== @@ -10869,12 +11173,21 @@ workerpool@6.2.0: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.0.tgz#827d93c9ba23ee2019c3ffaff5c27fccea289e8b" integrity sha512-Rsk5qQHJ9eowMH28Jwhe8HEbmdYDX4lwoMWshiCXugjtHqMD9ZbiqSDLxcsfdqsETPzVUtX5s1Z5kStiIM6l4A== -workerpool@^6.5.1: - version "6.5.1" - resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.5.1.tgz#060f73b39d0caf97c6db64da004cd01b4c099544" - integrity sha512-Fs4dNYcsdpYSAfVxhnl1L5zTksjvOJxtC5hzMNl+1t9B8hTJTdKDyZ5ju7ztgPy+ft9tBFXoOlDNiOT9WUXZlA== +workerpool@6.2.1: + version "6.2.1" + resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" + integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== + +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": + version "7.0.0" + resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" + integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== + dependencies: + ansi-styles "^4.0.0" + string-width "^4.1.0" + strip-ansi "^6.0.0" -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: +wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== @@ -10910,15 +11223,15 @@ ws@7.4.6: resolved "https://registry.yarnpkg.com/ws/-/ws-7.4.6.tgz#5654ca8ecdeee47c33a9a4bf6d28e2be2980377c" integrity sha512-YmhHDO4MzaDLB+M9ym/mDA5z0naX8j7SIlT8f8z+I0VtzsRbekxEutHSme7NPS2qE8StCYQNUnfWdXta/Yu85A== -ws@8.17.1: - version "8.17.1" - resolved "https://registry.yarnpkg.com/ws/-/ws-8.17.1.tgz#9293da530bb548febc95371d90f9c878727d919b" - integrity sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ== +ws@8.5.0: + version "8.5.0" + resolved "https://registry.yarnpkg.com/ws/-/ws-8.5.0.tgz#bfb4be96600757fe5382de12c670dab984a1ed4f" + integrity sha512-BWX0SWVgLPzYwF8lTzEy1egjhS4S4OEAHfsO8o65WOVsrnSRGaSiUaa9e0ggGlkMTtBlmOpEXiie9RUcBO86qg== ws@^7.4.6: - version "7.5.10" - resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.10.tgz#58b5c20dc281633f6c19113f39b349bd8bd558d9" - integrity sha512-+dbF1tHwZpXcbOJdVOkzLDxZP1ailvSxM6ZweXTegylPny803bFhA+vqBYw4s31NSAk4S2Qz+AKXK9a4wkdjcQ== + version "7.5.9" + resolved "https://registry.yarnpkg.com/ws/-/ws-7.5.9.tgz#54fa7db29f4c7cec68b1ddd3a89de099942bb591" + integrity sha512-F+P9Jil7UiSKSkppIiD94dN07AwvFixvLIj1Og1Rl9GGMuNipJnV9JzjD6XuqmAeiswGvUmNLjr5cFuXwNS77Q== xhr2@0.1.3: version "0.1.3" @@ -10945,17 +11258,22 @@ yallist@^3.0.2: resolved "https://registry.yarnpkg.com/yallist/-/yallist-3.1.1.tgz#dbb7daf9bfd8bac9ab45ebf602b8cbad0d5d08fd" integrity sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g== +yallist@^4.0.0: + version "4.0.0" + resolved "https://registry.yarnpkg.com/yallist/-/yallist-4.0.0.tgz#9bb92790d9c0effec63be73519e11a35019a3a72" + integrity sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A== + yaml@^2.4.2: - version "2.5.0" - resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.5.0.tgz#c6165a721cf8000e91c36490a41d7be25176cf5d" - integrity sha512-2wWLbGbYDiSqqIKoPjar3MPgB94ErzCtrNE1FdqGuaO0pi2JGjmE8aW8TDZwzU7vuxcGRdL/4gPQwQ7hD5AMSw== + version "2.4.2" + resolved "https://registry.yarnpkg.com/yaml/-/yaml-2.4.2.tgz#7a2b30f2243a5fc299e1f14ca58d475ed4bc5362" + integrity sha512-B3VqDZ+JAg1nZpaEmWtTXUlBneoGx6CPM9b0TENK6aoSu5t73dItudwdgmi6tHlIZZId4dZ9skcAQ2UbcyAeVA== yargs-parser@20.2.4: version "20.2.4" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.4.tgz#b42890f14566796f85ae8e3a25290d205f154a54" integrity sha512-WOkpgNhPTlE73h4VFAFsOnomJVaovO8VqLDzy5saChRBFQFBoMYirowyW+Q9HB4HFF4Z7VZTiG3iSzJJA29yRA== -yargs-parser@^20.2.2, yargs-parser@^20.2.9: +yargs-parser@^20.2.2: version "20.2.9" resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-20.2.9.tgz#2eb7dc3b0289718fc295f362753845c41a0c94ee" integrity sha512-y11nGElTIV+CT3Zv9t7VKl+Q3hTQoT9a1Qzezhhl6Rp21gJ/IVTW7Z3y9EWXhuUBC2Shnf+DX0antecpAwSP8w== @@ -10965,7 +11283,7 @@ yargs-parser@^21.0.1, yargs-parser@^21.1.1: resolved "https://registry.yarnpkg.com/yargs-parser/-/yargs-parser-21.1.1.tgz#9096bceebf990d21bb31fa9516e0ede294a77d35" integrity sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw== -yargs-unparser@2.0.0, yargs-unparser@^2.0.0: +yargs-unparser@2.0.0: version "2.0.0" resolved "https://registry.yarnpkg.com/yargs-unparser/-/yargs-unparser-2.0.0.tgz#f131f9226911ae5d9ad38c432fe809366c2325eb" integrity sha512-7pRTIA9Qc1caZ0bZ6RYRGbHJthJWuakf+WmHK0rVeLkNrrGhfoabBNdue6kdINI6r4if7ocq9aD/n7xwKOdzOA== @@ -10975,7 +11293,7 @@ yargs-unparser@2.0.0, yargs-unparser@^2.0.0: flat "^5.0.2" is-plain-obj "^2.1.0" -yargs@16.2.0, yargs@^16.2.0: +yargs@16.2.0: version "16.2.0" resolved "https://registry.yarnpkg.com/yargs/-/yargs-16.2.0.tgz#1c82bf0f6b6a66eafce7ef30e376f49a12477f66" integrity sha512-D1mvvtDG0L5ft/jGWkLpG1+m0eQxOfaBvTNELraWj22wSVUMWxZUvYgJYcKh6jGGIkJFhH4IZPQhR4TKpc8mBw== @@ -11012,9 +11330,13 @@ yocto-queue@^0.1.0: integrity sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q== yocto-queue@^1.0.0: - version "1.1.1" - resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.1.1.tgz#fef65ce3ac9f8a32ceac5a634f74e17e5b232110" - integrity sha512-b4JR1PFR10y1mKjhHY9LaGo6tmrgjit7hxVIeAmyMw3jegXR4dhYqLaQF5zMXZxY7tLpMyJeLjr1C4rLmkVe8g== + version "1.0.0" + resolved "https://registry.yarnpkg.com/yocto-queue/-/yocto-queue-1.0.0.tgz#7f816433fb2cbc511ec8bf7d263c3b58a1a3c251" + integrity sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g== + +"zksync-ethers-gw@https://github.com/zksync-sdk/zksync-ethers#kl/gateway-support": + version "6.12.1" + resolved "https://github.com/zksync-sdk/zksync-ethers#aa834387686ff8c04e41d1675b98f91d6c01847b" zksync-ethers@5.8.0-beta.5: version "5.8.0-beta.5" @@ -11034,3 +11356,7 @@ zksync-ethers@^6.9.0: version "6.9.0" resolved "https://registry.yarnpkg.com/zksync-ethers/-/zksync-ethers-6.9.0.tgz#efaff1d59e2cff837eeda84c4ba59fdca4972a91" integrity sha512-2CppwvLHtz689L7E9EhevbFtsqVukKC/lVicwdeUS2yqV46ET4iBR11rYdEfGW2oEo1h6yJuuwIBDFm2SybkIA== + +"zksync-ethers@git+https://github.com/zksync-sdk/zksync-ethers#ra/fix-l2-l1-bridging": + version "6.12.1" + resolved "git+https://github.com/zksync-sdk/zksync-ethers#d33ee6003e529adf79d9de4b19de9235da3a6da7" diff --git a/zk_toolbox/crates/common/src/term/spinner.rs b/zk_toolbox/crates/common/src/term/spinner.rs deleted file mode 100644 index b97ba075ac4..00000000000 --- a/zk_toolbox/crates/common/src/term/spinner.rs +++ /dev/null @@ -1,51 +0,0 @@ -use std::time::Instant; - -use cliclack::{spinner, ProgressBar}; - -use crate::config::global_config; - -/// Spinner is a helper struct to show a spinner while some operation is running. -pub struct Spinner { - msg: String, - pb: ProgressBar, - time: Instant, -} - -impl Spinner { - /// Create a new spinner with a message. - pub fn new(msg: &str) -> Self { - let pb = spinner(); - pb.start(msg); - if global_config().verbose { - pb.stop(msg); - } - Spinner { - msg: msg.to_owned(), - pb, - time: Instant::now(), - } - } - - /// Manually finish the spinner. - pub fn finish(self) { - self.pb.stop(format!( - "{} done in {} secs", - self.msg, - self.time.elapsed().as_secs_f64() - )); - } - - /// Interrupt the spinner with a failed message. - pub fn fail(self) { - self.pb.error(format!( - "{} failed in {} secs", - self.msg, - self.time.elapsed().as_secs_f64() - )); - } - - /// Freeze the spinner with current message. - pub fn freeze(self) { - self.pb.stop(self.msg); - } -} diff --git a/zk_toolbox/crates/common/src/wallets.rs b/zk_toolbox/crates/common/src/wallets.rs deleted file mode 100644 index ed5e11b3261..00000000000 --- a/zk_toolbox/crates/common/src/wallets.rs +++ /dev/null @@ -1,64 +0,0 @@ -use ethers::{ - core::rand::Rng, - signers::{coins_bip39::English, LocalWallet, MnemonicBuilder, Signer}, - types::{Address, H256}, -}; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Wallet { - pub address: Address, - pub private_key: Option, -} - -impl Wallet { - pub fn random(rng: &mut impl Rng) -> Self { - let private_key = H256::random_using(rng); - let local_wallet = LocalWallet::from_bytes(private_key.as_bytes()).unwrap(); - - Self { - address: Address::from_slice(local_wallet.address().as_bytes()), - private_key: Some(private_key), - } - } - - pub fn new_with_key(private_key: H256) -> Self { - let local_wallet = LocalWallet::from_bytes(private_key.as_bytes()).unwrap(); - Self { - address: Address::from_slice(local_wallet.address().as_bytes()), - private_key: Some(private_key), - } - } - - pub fn from_mnemonic(mnemonic: &str, base_path: &str, index: u32) -> anyhow::Result { - let wallet = MnemonicBuilder::::default() - .phrase(mnemonic) - .derivation_path(&format!("{}/{}", base_path, index))? - .build()?; - let private_key = H256::from_slice(&wallet.signer().to_bytes()); - Ok(Self::new_with_key(private_key)) - } - - pub fn empty() -> Self { - Self { - address: Address::zero(), - private_key: Some(H256::zero()), - } - } -} - -#[test] -fn test_load_localhost_wallets() { - let wallet = Wallet::from_mnemonic( - "stuff slice staff easily soup parent arm payment cotton trade scatter struggle", - "m/44'/60'/0'/0", - 1, - ) - .unwrap(); - assert_eq!( - wallet.address, - Address::from_slice( - ðers::utils::hex::decode("0xa61464658AfeAf65CccaaFD3a512b69A83B77618").unwrap() - ) - ); -} diff --git a/zk_toolbox/crates/zk_inception/build.rs b/zk_toolbox/crates/zk_inception/build.rs deleted file mode 100644 index 43c8d7a5aac..00000000000 --- a/zk_toolbox/crates/zk_inception/build.rs +++ /dev/null @@ -1,11 +0,0 @@ -use std::path::PathBuf; - -use ethers::contract::Abigen; - -fn main() -> eyre::Result<()> { - let outdir = PathBuf::from(std::env::var("OUT_DIR")?).canonicalize()?; - Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json")? - .generate()? - .write_to_file(outdir.join("consensus_registry_abi.rs"))?; - Ok(()) -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs deleted file mode 100644 index b2d92ebd104..00000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ /dev/null @@ -1,176 +0,0 @@ -use std::path::PathBuf; - -use anyhow::Context; -use common::{ - config::global_config, - db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, - logger, - server::{Server, ServerMode}, - spinner::Spinner, -}; -use config::{ - override_config, set_databases, set_file_artifacts, set_rocks_db_config, - traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, ContractsConfig, EcosystemConfig, FileArtifacts, GeneralConfig, GenesisConfig, - SecretsConfig, WalletsConfig, -}; -use types::ProverMode; -use xshell::Shell; -use zksync_basic_types::commitment::L1BatchCommitmentMode; - -use super::args::genesis::GenesisArgsFinal; -use crate::{ - commands::chain::args::genesis::GenesisArgs, - consts::{ - PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, - PROVER_MIGRATIONS, SERVER_MIGRATIONS, - }, - messages::{ - MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, - MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_FAILED_TO_RUN_SERVER_ERR, - MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, - MSG_INITIALIZING_PROVER_DATABASE, MSG_INITIALIZING_SERVER_DATABASE, - MSG_RECREATE_ROCKS_DB_ERRROR, MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, - MSG_STARTING_GENESIS_SPINNER, - }, - utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, -}; - -pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain_config = ecosystem_config - .load_current_chain() - .context(MSG_CHAIN_NOT_INITIALIZED)?; - let args = args.fill_values_with_prompt(&chain_config); - - genesis(args, shell, &chain_config).await?; - logger::outro(MSG_GENESIS_COMPLETED); - - Ok(()) -} - -pub async fn genesis( - args: GenesisArgsFinal, - shell: &Shell, - config: &ChainConfig, -) -> anyhow::Result<()> { - shell.create_dir(&config.rocks_db_path)?; - - let link_to_code = config.link_to_code.clone(); - let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) - .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; - let mut general = config.get_general_config()?; - let file_artifacts = FileArtifacts::new(config.artifacts.clone()); - set_rocks_db_config(&mut general, rocks_db)?; - set_file_artifacts(&mut general, file_artifacts); - general.save_with_base_path(shell, &config.configs)?; - - if config.prover_version != ProverMode::NoProofs { - override_config( - shell, - link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), - config, - )?; - } - - if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { - override_config( - shell, - link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), - config, - )?; - } - - let mut secrets = config.get_secrets_config()?; - set_databases(&mut secrets, &args.server_db, &args.prover_db)?; - secrets.save_with_base_path(shell, &config.configs)?; - - logger::note( - MSG_SELECTED_CONFIG, - logger::object_to_string(serde_json::json!({ - "chain_config": config, - "server_db_config": args.server_db, - "prover_db_config": args.prover_db, - })), - ); - logger::info(MSG_STARTING_GENESIS); - - let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); - initialize_databases( - shell, - &args.server_db, - &args.prover_db, - config.link_to_code.clone(), - args.dont_drop, - ) - .await?; - spinner.finish(); - - let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); - run_server_genesis(config, shell)?; - spinner.finish(); - - Ok(()) -} - -async fn initialize_databases( - shell: &Shell, - server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, - link_to_code: PathBuf, - dont_drop: bool, -) -> anyhow::Result<()> { - let path_to_server_migration = link_to_code.join(SERVER_MIGRATIONS); - - if global_config().verbose { - logger::debug(MSG_INITIALIZING_SERVER_DATABASE) - } - if !dont_drop { - drop_db_if_exists(server_db_config) - .await - .context(MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR)?; - init_db(server_db_config).await?; - } - migrate_db( - shell, - path_to_server_migration, - &server_db_config.full_url(), - ) - .await?; - - if global_config().verbose { - logger::debug(MSG_INITIALIZING_PROVER_DATABASE) - } - if !dont_drop { - drop_db_if_exists(prover_db_config) - .await - .context(MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR)?; - init_db(prover_db_config).await?; - } - let path_to_prover_migration = link_to_code.join(PROVER_MIGRATIONS); - migrate_db( - shell, - path_to_prover_migration, - &prover_db_config.full_url(), - ) - .await?; - - Ok(()) -} - -fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { - let server = Server::new(None, chain_config.link_to_code.clone(), false); - server - .run( - shell, - ServerMode::Genesis, - GenesisConfig::get_path_with_base_path(&chain_config.configs), - WalletsConfig::get_path_with_base_path(&chain_config.configs), - GeneralConfig::get_path_with_base_path(&chain_config.configs), - SecretsConfig::get_path_with_base_path(&chain_config.configs), - ContractsConfig::get_path_with_base_path(&chain_config.configs), - None, - vec![], - ) - .context(MSG_FAILED_TO_RUN_SERVER_ERR) -} diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml deleted file mode 100644 index d343e7af43e..00000000000 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "zk_supervisor" -version = "0.1.0" -edition.workspace = true -homepage.workspace = true -license.workspace = true -authors.workspace = true -exclude.workspace = true -repository.workspace = true -description.workspace = true -keywords.workspace = true - -[dependencies] -anyhow.workspace = true -clap.workspace = true -common.workspace = true -config.workspace = true -chrono.workspace = true -ethers.workspace = true -human-panic.workspace = true -strum.workspace = true -tokio.workspace = true -url.workspace = true -xshell.workspace = true -serde.workspace = true -serde_json.workspace = true -clap-markdown.workspace = true -futures.workspace = true -types.workspace = true -serde_yaml.workspace = true -zksync_basic_types.workspace = true -sqruff-lib = "0.19.0" diff --git a/zk_toolbox/crates/zk_supervisor/README.md b/zk_toolbox/crates/zk_supervisor/README.md deleted file mode 100644 index 865bd2f0d57..00000000000 --- a/zk_toolbox/crates/zk_supervisor/README.md +++ /dev/null @@ -1,386 +0,0 @@ -# Command-Line Help for `zk_supervisor` - -This document contains the help content for the `zk_supervisor` command-line program. - -**Command Overview:** - -- [`zk_supervisor`↴](#zk_supervisor) -- [`zk_supervisor database`↴](#zk_supervisor-database) -- [`zk_supervisor database check-sqlx-data`↴](#zk_supervisor-database-check-sqlx-data) -- [`zk_supervisor database drop`↴](#zk_supervisor-database-drop) -- [`zk_supervisor database migrate`↴](#zk_supervisor-database-migrate) -- [`zk_supervisor database new-migration`↴](#zk_supervisor-database-new-migration) -- [`zk_supervisor database prepare`↴](#zk_supervisor-database-prepare) -- [`zk_supervisor database reset`↴](#zk_supervisor-database-reset) -- [`zk_supervisor database setup`↴](#zk_supervisor-database-setup) -- [`zk_supervisor test`↴](#zk_supervisor-test) -- [`zk_supervisor test integration`↴](#zk_supervisor-test-integration) -- [`zk_supervisor test revert`↴](#zk_supervisor-test-revert) -- [`zk_supervisor test recovery`↴](#zk_supervisor-test-recovery) -- [`zk_supervisor test upgrade`↴](#zk_supervisor-test-upgrade) -- [`zk_supervisor test rust`↴](#zk_supervisor-test-rust) -- [`zk_supervisor test l1-contracts`↴](#zk_supervisor-test-l1-contracts) -- [`zk_supervisor test prover`↴](#zk_supervisor-test-prover) -- [`zk_supervisor clean`↴](#zk_supervisor-clean) -- [`zk_supervisor clean all`↴](#zk_supervisor-clean-all) -- [`zk_supervisor clean containers`↴](#zk_supervisor-clean-containers) -- [`zk_supervisor clean contracts-cache`↴](#zk_supervisor-clean-contracts-cache) -- [`zk_supervisor snapshot`↴](#zk_supervisor-snapshot) -- [`zk_supervisor snapshot create`↴](#zk_supervisor-snapshot-create) -- [`zk_supervisor lint`↴](#zk_supervisor-lint) -- [`zk_supervisor fmt`↴](#zk_supervisor-fmt) -- [`zk_supervisor fmt rustfmt`↴](#zk_supervisor-fmt-rustfmt) -- [`zk_supervisor fmt contract`↴](#zk_supervisor-fmt-contract) -- [`zk_supervisor fmt prettier`↴](#zk_supervisor-fmt-prettier) -- [`zk_supervisor prover info`↴](#zk_supervisor-prover-info) -- [`zk_supervisor prover insert-version`↴](#zk_supervisor-prover-insert-version) -- [`zk_supervisor prover insert-batch`↴](#zk_supervisor-prover-insert-batch) - -## `zk_supervisor` - -ZK Toolbox is a set of tools for working with zk stack. - -**Usage:** `zk_supervisor [OPTIONS] ` - -###### **Subcommands:** - -- `database` — Database related commands -- `test` — Run tests -- `clean` — Clean artifacts -- `snapshot` — Snapshots creator -- `lint` — Lint code -- `fmt` — Format code -- `prover-version` — Protocol version used by provers - -###### **Options:** - -- `-v`, `--verbose` — Verbose mode -- `--chain ` — Chain to use -- `--ignore-prerequisites` — Ignores prerequisites checks - -## `zk_supervisor database` - -Database related commands - -**Usage:** `zk_supervisor database ` - -###### **Subcommands:** - -- `check-sqlx-data` — Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked. -- `drop` — Drop databases. If no databases are selected, all databases will be dropped. -- `migrate` — Migrate databases. If no databases are selected, all databases will be migrated. -- `new-migration` — Create new migration -- `prepare` — Prepare sqlx-data.json. If no databases are selected, all databases will be prepared. -- `reset` — Reset databases. If no databases are selected, all databases will be reset. -- `setup` — Setup databases. If no databases are selected, all databases will be setup. - -## `zk_supervisor database check-sqlx-data` - -Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked. - -**Usage:** `zk_supervisor database check-sqlx-data [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database drop` - -Drop databases. If no databases are selected, all databases will be dropped. - -**Usage:** `zk_supervisor database drop [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database migrate` - -Migrate databases. If no databases are selected, all databases will be migrated. - -**Usage:** `zk_supervisor database migrate [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database new-migration` - -Create new migration - -**Usage:** `zk_supervisor database new-migration [OPTIONS]` - -###### **Options:** - -- `--database ` — Database to create new migration for - - Possible values: `prover`, `core` - -- `--name ` — Migration name - -## `zk_supervisor database prepare` - -Prepare sqlx-data.json. If no databases are selected, all databases will be prepared. - -**Usage:** `zk_supervisor database prepare [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database reset` - -Reset databases. If no databases are selected, all databases will be reset. - -**Usage:** `zk_supervisor database reset [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor database setup` - -Setup databases. If no databases are selected, all databases will be setup. - -**Usage:** `zk_supervisor database setup [OPTIONS]` - -###### **Options:** - -- `-p`, `--prover ` — Prover database - - Possible values: `true`, `false` - -- `-c`, `--core ` — Core database - - Possible values: `true`, `false` - -## `zk_supervisor test` - -Run tests - -**Usage:** `zk_supervisor test ` - -###### **Subcommands:** - -- `integration` — Run integration tests -- `revert` — Run revert tests -- `recovery` — Run recovery tests -- `upgrade` — Run upgrade tests -- `rust` — Run unit-tests, accepts optional cargo test flags -- `l1-contracts` — Run L1 contracts tests -- `prover` — Run prover tests - -## `zk_supervisor test integration` - -Run integration tests - -**Usage:** `zk_supervisor test integration [OPTIONS]` - -###### **Options:** - -- `-e`, `--external-node` — Run tests for external node - -## `zk_supervisor test revert` - -Run revert tests - -**Usage:** `zk_supervisor test revert [OPTIONS]` - -###### **Options:** - -- `--enable-consensus` — Enable consensus -- `-e`, `--external-node` — Run tests for external node - -## `zk_supervisor test recovery` - -Run recovery tests - -**Usage:** `zk_supervisor test recovery [OPTIONS]` - -###### **Options:** - -- `-s`, `--snapshot` — Run recovery from a snapshot instead of genesis - -## `zk_supervisor test upgrade` - -Run upgrade tests - -**Usage:** `zk_supervisor test upgrade` - -## `zk_supervisor test rust` - -Run unit-tests, accepts optional cargo test flags - -**Usage:** `zk_supervisor test rust [OPTIONS]` - -###### **Options:** - -- `--options ` — Cargo test flags - -## `zk_supervisor test l1-contracts` - -Run L1 contracts tests - -**Usage:** `zk_supervisor test l1-contracts` - -## `zk_supervisor test prover` - -Run prover tests - -**Usage:** `zk_supervisor test prover` - -## `zk_supervisor clean` - -Clean artifacts - -**Usage:** `zk_supervisor clean ` - -###### **Subcommands:** - -- `all` — Remove containers and contracts cache -- `containers` — Remove containers and docker volumes -- `contracts-cache` — Remove contracts caches - -## `zk_supervisor clean all` - -Remove containers and contracts cache - -**Usage:** `zk_supervisor clean all` - -## `zk_supervisor clean containers` - -Remove containers and docker volumes - -**Usage:** `zk_supervisor clean containers` - -## `zk_supervisor clean contracts-cache` - -Remove contracts caches - -**Usage:** `zk_supervisor clean contracts-cache` - -## `zk_supervisor snapshot` - -Snapshots creator - -**Usage:** `zk_supervisor snapshot ` - -###### **Subcommands:** - -- `create` — - -## `zk_supervisor snapshot create` - -**Usage:** `zk_supervisor snapshot create` - -## `zk_supervisor lint` - -Lint code - -**Usage:** `zk_supervisor lint [OPTIONS]` - -###### **Options:** - -- `-c`, `--check` -- `-e`, `--extensions ` - - Possible values: `md`, `sol`, `js`, `ts`, `rs` - -## `zk_supervisor fmt` - -Format code - -**Usage:** `zk_supervisor fmt [OPTIONS] [COMMAND]` - -###### **Subcommands:** - -- `rustfmt` — -- `contract` — -- `prettier` — - -###### **Options:** - -- `-c`, `--check` - -## `zk_supervisor fmt rustfmt` - -**Usage:** `zk_supervisor fmt rustfmt` - -## `zk_supervisor fmt contract` - -**Usage:** `zk_supervisor fmt contract` - -## `zk_supervisor fmt prettier` - -**Usage:** `zk_supervisor fmt prettier [OPTIONS]` - -###### **Options:** - -- `-e`, `--extensions ` - - Possible values: `md`, `sol`, `js`, `ts`, `rs` - -## `zk_supervisor prover info` - -Prints prover protocol version, snark wrapper and prover database URL - -**Usage:** `zk_supervisor prover info` - -## `zk_supervisor prover insert-version` - -Inserts protocol version into prover database. - -**Usage:** `zk_supervisor prover insert-version [OPTIONS]` - -###### **Options:** - -- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. -- `--snark-wrapper ` — Snark wrapper hash. -- `--default` - use default values for protocol version and snark wrapper hash (the ones found in zksync-era). - -## `zk_supervisor prover insert-batch` - -Inserts batch into prover database. - -**Usage:** `zk_supervisor prover insert-batch` - -###### **Options:** - -- `--number ` — Number of the batch to insert. -- `--version ` — Protocol version in semantic format(`x.y.z`). Major version should be 0. -- `--default` - use default value for protocol version (the one found in zksync-era). - -
- - This document was generated automatically by -clap-markdown. diff --git a/zk_toolbox/crates/zk_supervisor/src/main.rs b/zk_toolbox/crates/zk_supervisor/src/main.rs deleted file mode 100644 index 242affd8a71..00000000000 --- a/zk_toolbox/crates/zk_supervisor/src/main.rs +++ /dev/null @@ -1,151 +0,0 @@ -use clap::{Parser, Subcommand}; -use commands::{ - config_writer::ConfigWriterArgs, contracts::ContractsArgs, database::DatabaseCommands, - lint::LintArgs, prover::ProverCommands, send_transactions::args::SendTransactionsArgs, - snapshot::SnapshotCommands, test::TestCommands, -}; -use common::{ - check_general_prerequisites, - config::{global_config, init_global_config, GlobalConfig}, - error::log_error, - init_prompt_theme, logger, - version::version_message, -}; -use config::EcosystemConfig; -use messages::{ - msg_global_chain_does_not_exist, MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, - MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, - MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, - MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, -}; -use xshell::Shell; - -use crate::commands::{clean::CleanCommands, fmt::FmtArgs}; - -mod commands; -mod consts; -mod dals; -mod defaults; -mod messages; - -#[derive(Parser, Debug)] -#[command( - version = version_message(env!("CARGO_PKG_VERSION")), - about -)] -struct Supervisor { - #[command(subcommand)] - command: SupervisorSubcommands, - #[clap(flatten)] - global: SupervisorGlobalArgs, -} - -#[derive(Subcommand, Debug)] -enum SupervisorSubcommands { - #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT, alias = "db")] - Database(DatabaseCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_TESTS_ABOUT, alias = "t")] - Test(TestCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_CLEAN)] - Clean(CleanCommands), - #[command(subcommand, about = MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT)] - Snapshot(SnapshotCommands), - #[command(about = MSG_SUBCOMMAND_LINT_ABOUT, alias = "l")] - Lint(LintArgs), - #[command(about = MSG_SUBCOMMAND_FMT_ABOUT)] - Fmt(FmtArgs), - #[command(hide = true)] - Markdown, - #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] - Prover(ProverCommands), - #[command(about = MSG_CONTRACTS_ABOUT)] - Contracts(ContractsArgs), - #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] - ConfigWriter(ConfigWriterArgs), - #[command(about = MSG_SEND_TXNS_ABOUT)] - SendTransactions(SendTransactionsArgs), -} - -#[derive(Parser, Debug)] -#[clap(next_help_heading = "Global options")] -struct SupervisorGlobalArgs { - /// Verbose mode - #[clap(short, long, global = true)] - verbose: bool, - /// Chain to use - #[clap(long, global = true)] - chain: Option, - /// Ignores prerequisites checks - #[clap(long, global = true)] - ignore_prerequisites: bool, -} - -#[tokio::main] -async fn main() -> anyhow::Result<()> { - human_panic::setup_panic!(); - - // We must parse arguments before printing the intro, because some autogenerated - // Clap commands (like `--version` would look odd otherwise). - let args = Supervisor::parse(); - - init_prompt_theme(); - - logger::new_empty_line(); - logger::intro(); - - let shell = Shell::new().unwrap(); - init_global_config_inner(&shell, &args.global)?; - - if !global_config().ignore_prerequisites { - check_general_prerequisites(&shell); - } - - match run_subcommand(args, &shell).await { - Ok(_) => {} - Err(error) => { - log_error(error); - std::process::exit(1); - } - } - - Ok(()) -} - -async fn run_subcommand(args: Supervisor, shell: &Shell) -> anyhow::Result<()> { - match args.command { - SupervisorSubcommands::Database(command) => commands::database::run(shell, command).await?, - SupervisorSubcommands::Test(command) => commands::test::run(shell, command).await?, - SupervisorSubcommands::Clean(command) => commands::clean::run(shell, command)?, - SupervisorSubcommands::Snapshot(command) => commands::snapshot::run(shell, command).await?, - SupervisorSubcommands::Markdown => { - clap_markdown::print_help_markdown::(); - } - SupervisorSubcommands::Lint(args) => commands::lint::run(shell, args)?, - SupervisorSubcommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, - SupervisorSubcommands::Prover(command) => commands::prover::run(shell, command).await?, - SupervisorSubcommands::Contracts(args) => commands::contracts::run(shell, args)?, - SupervisorSubcommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, - SupervisorSubcommands::SendTransactions(args) => { - commands::send_transactions::run(shell, args).await? - } - } - Ok(()) -} - -fn init_global_config_inner(shell: &Shell, args: &SupervisorGlobalArgs) -> anyhow::Result<()> { - if let Some(name) = &args.chain { - if let Ok(config) = EcosystemConfig::from_file(shell) { - let chains = config.list_of_chains(); - if !chains.contains(name) { - anyhow::bail!(msg_global_chain_does_not_exist(name, &chains.join(", "))); - } - } - } - - init_global_config(GlobalConfig { - verbose: args.verbose, - chain_name: args.chain.clone(), - ignore_prerequisites: args.ignore_prerequisites, - }); - Ok(()) -} diff --git a/zk_toolbox/rust-toolchain b/zk_toolbox/rust-toolchain deleted file mode 100644 index dbd41264aa9..00000000000 --- a/zk_toolbox/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -1.81.0 diff --git a/zk_toolbox/zkup/README.md b/zk_toolbox/zkup/README.md deleted file mode 100644 index d6e3e634688..00000000000 --- a/zk_toolbox/zkup/README.md +++ /dev/null @@ -1,76 +0,0 @@ -# zkup - zk_toolbox Installer - -`zkup` is a script designed to simplify the installation of -[zk_toolbox](https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox). It allows you to install the tool from a -local directory or directly from a GitHub repository. - -## Getting Started - -To install `zkup`, run the following command: - -```bash -curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zk_toolbox/zkup/install | bash -``` - -After installing `zkup`, you can use it to install `zk_toolbox` with: - -```bash -zkup -``` - -## Usage - -The `zkup` script provides various options for installing `zk_toolbox`: - -### Options - -- `-p, --path ` - Specify a local path to install `zk_toolbox` from. This option is ignored if `--repo` is provided. - -- `-r, --repo ` - GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". - -- `-b, --branch ` - Git branch to use when installing from a repository. Ignored if `--commit` or `--version` is provided. - -- `-c, --commit ` - Git commit hash to use when installing from a repository. Ignored if `--branch` or `--version` is provided. - -- `-v, --version ` - Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. - -- `--inception` - Installs `zk_inception` from the repository. By default, `zkup` installs `zk_inception` and `zk_supervisor`. - -- `--supervisor` - Installs `zk_supervisor` from the repository. - -### Local Installation - -If you provide a local path using the `-p` or `--path` option, `zkup` will install `zk_toolbox` from that directory. -Note that repository-specific arguments (`--repo`, `--branch`, `--commit`, `--version`) will be ignored in this case to -preserve git state. - -### Repository Installation - -By default, `zkup` installs `zk_toolbox` from the "matter-labs/zksync-era" GitHub repository. You can specify a -different repository, branch, commit, or version using the respective options. If multiple arguments are provided, -`zkup` will prioritize them as follows: - -- `--version` -- `--commit` -- `--branch` - -### Examples - -**Install from a GitHub repository with a specific version:** - -```bash -zkup --repo matter-labs/zksync-era --version 0.1.1 -``` - -**Install from a local path, only installing `zk_inception`:** - -```bash -zkup --path /path/to/local/zk_toolbox --inception -``` diff --git a/zk_toolbox/zkup/install b/zk_toolbox/zkup/install deleted file mode 100755 index 4e24b03dec4..00000000000 --- a/zk_toolbox/zkup/install +++ /dev/null @@ -1,55 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -BASE_DIR=${XDG_CONFIG_HOME:-$HOME} -ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} -ZKT_BIN_DIR="$ZKT_DIR/bin" - -BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zk_toolbox/zkup/zkup" -BIN_PATH="$ZKT_BIN_DIR/zkup" - -mkdir -p "$ZKT_BIN_DIR" -curl -sSfL "$BIN_URL" -o "$BIN_PATH" -chmod +x "$BIN_PATH" - -if [[ ":$PATH:" == *":${ZKT_BIN_DIR}:"* ]]; then - echo "zkup: found ${ZKT_BIN_DIR} in PATH" - exit 0 -fi - -case $SHELL in -*/zsh) - PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" - ;; -*/bash) - PROFILE="$HOME/.bashrc" - ;; -*/fish) - PROFILE="$HOME/.config/fish/config.fish" - ;; -*/ash) - PROFILE="$HOME/.profile" - ;; -*) - echo "zkup: could not detect shell, manually add ${ZKT_BIN_DIR} to your PATH." - exit 1 - ;; -esac - -if [[ ! -f "$PROFILE" ]]; then - echo "zkup: Profile file $PROFILE does not exist, creating it." - touch "$PROFILE" -fi - -if [[ "$SHELL" == *"/fish"* ]]; then - echo -e "\n# Added by zkup\nfish_add_path -a $ZKT_BIN_DIR" >>"$PROFILE" - echo "zkup: Added $ZKT_BIN_DIR to PATH in $PROFILE using fish_add_path." -else - echo -e "\n# Added by zkup\nexport PATH=\"\$PATH:$ZKT_BIN_DIR\"" >>"$PROFILE" - echo "zkup: Added $ZKT_BIN_DIR to PATH in $PROFILE." -fi - -echo -echo "Added zkup to PATH." -echo "Run 'source $PROFILE' or start a new terminal session to use zkup." -echo "Then run 'zkup' to install ZK Toolbox." diff --git a/zk_toolbox/zkup/zkup b/zk_toolbox/zkup/zkup deleted file mode 100755 index e6ca1748738..00000000000 --- a/zk_toolbox/zkup/zkup +++ /dev/null @@ -1,254 +0,0 @@ -#!/usr/bin/env bash -set -eo pipefail - -BASE_DIR=${XDG_CONFIG_HOME:-$HOME} -ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} -ZKT_BIN_DIR="$ZKT_DIR/bin" - -ZKUP_INSTALL_SUPERVISOR=0 -ZKUP_INSTALL_INCEPTION=0 -ZKUP_ALIAS=0 - -BINS=() - -main() { - parse_args "$@" - - zktoolbox_banner - - check_prerequisites - mkdir -p "$ZKT_BIN_DIR" - - set_bins - - if [ -n "$ZKUP_PATH" ]; then - install_local - else - install_from_repo - fi - - zktoolbox_banner - - for bin in "${BINS[@]}"; do - success "Installed $bin to $ZKT_BIN_DIR/$bin" - done - - if [ $ZKUP_ALIAS -eq 1 ]; then - create_alias - fi -} - -PREREQUISITES=(cargo git) - -check_prerequisites() { - say "Checking prerequisites" - - failed_prerequisites=() - for prerequisite in "${PREREQUISITES[@]}"; do - if ! check_prerequisite "$prerequisite"; then - failed_prerequisites+=("$prerequisite") - fi - done - if [ ${#failed_prerequisites[@]} -gt 0 ]; then - err "The following prerequisites are missing: ${failed_prerequisites[*]}" - exit 1 - fi -} - -check_prerequisite() { - command -v "$1" &>/dev/null -} - -parse_args() { - while [[ $# -gt 0 ]]; do - case $1 in - --) - shift - break - ;; - - -p | --path) - shift - ZKUP_PATH=$1 - ;; - -r | --repo) - shift - ZKUP_REPO=$1 - ;; - -b | --branch) - shift - ZKUP_BRANCH=$1 - ;; - -c | --commit) - shift - ZKUP_COMMIT=$1 - ;; - -v | --version) - shift - ZKUP_VERSION=$1 - ;; - --inception) ZKUP_INSTALL_INCEPTION=1 ;; - --supervisor) ZKUP_INSTALL_SUPERVISOR=1 ;; - -a | --alias) ZKUP_ALIAS=1 ;; - -h | --help) - usage - exit 0 - ;; - *) - err "Unknown argument: $1" - usage - exit 1 - ;; - esac - shift - done -} - -usage() { - cat < Specify a local path to install zk_toolbox from. Ignored if --repo is provided. - -r, --repo GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". - -b, --branch Git branch to use when installing from a repository. Ignored if --commit or --version is provided. - -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. - -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. - -a, --alias Create aliases zki and zks for zk_inception and zk_supervisor binaries. - --inception Installs the zk_inception binary. Default is to install both zk_inception and zk_supervisor binaries. - --supervisor Installs the zk_supervisor binary. Default is to install both zk_inception and zk_supervisor binaries. - -h, --help Show this help message and exit. - -Examples: - $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 - $(basename "$0") --path /path/to/local/zk_toolbox --inception -EOF -} - -set_bins() { - if [ $ZKUP_INSTALL_INCEPTION -eq 1 ]; then - BINS+=(zk_inception) - fi - - if [ $ZKUP_INSTALL_SUPERVISOR -eq 1 ]; then - BINS+=(zk_supervisor) - fi - - # Installs both binaries if not option is provided - if [ ${#BINS[@]} -eq 0 ]; then - BINS=(zk_inception zk_supervisor) - fi -} - -install_local() { - if [ ! -d "$ZKUP_PATH/zk_toolbox" ]; then - err "Path $ZKUP_PATH does not contain zk_toolbox" - exit 1 - fi - - if [ -n "$ZKUP_BRANCH" ] || [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_VERSION" ] || [ -n "$ZKUP_REPO" ]; then - warn "Ignoring --repo, --branch, --commit and --version arguments when installing from local path" - fi - - say "Installing zk_toolbox from $ZKUP_PATH" - ensure cd "$ZKUP_PATH"/zk_toolbox - - for bin in "${BINS[@]}"; do - say "Installing $bin" - ensure cargo install --root $ZKT_DIR --path ./crates/$bin --force - done -} - -install_from_repo() { - if [ -n "$ZKUP_PATH" ]; then - warn "Ignoring --path argument when installing from repository" - fi - - ZKUP_REPO=${ZKUP_REPO:-"matter-labs/zksync-era"} - - say "Installing zk_toolbox from $ZKUP_REPO" - - if [ -n "$ZKUP_VERSION" ]; then - if [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_BRANCH" ]; then - warn "Ignoring --commit and --branch arguments when installing by version" - fi - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --tag "zk_toolbox-v$ZKUP_VERSION" --locked "${BINS[@]}" --force - elif [ -n "$ZKUP_COMMIT" ]; then - if [ -n "$ZKUP_BRANCH" ]; then - warn "Ignoring --branch argument when installing by commit" - fi - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --rev "$ZKUP_COMMIT" --locked "${BINS[@]}" --force - elif [ -n "$ZKUP_BRANCH" ]; then - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --branch "$ZKUP_BRANCH" --locked "${BINS[@]}" --force - else - ensure cargo install --root $ZKT_DIR --git "https://github.com/$ZKUP_REPO" --locked "${BINS[@]}" --force - fi -} - -create_alias() { - if [[ "${BINS[@]}" =~ "zk_inception" ]]; then - say "Creating alias 'zki' for zk_inception" - ensure ln -sf "$ZKT_BIN_DIR/zk_inception" "$ZKT_BIN_DIR/zki" - fi - - if [[ "${BINS[@]}" =~ "zk_supervisor" ]]; then - say "Creating alias 'zks' for zk_supervisor" - ensure ln -sf "$ZKT_BIN_DIR/zk_supervisor" "$ZKT_BIN_DIR/zks" - fi -} - -ensure() { - if ! "$@"; then - err "command failed: $*" - exit 1 - fi -} - -say() { - local action="${1%% *}" - local rest="${1#"$action" }" - - echo -e "\033[1;32m$action\033[0m $rest" -} - -success() { - echo -e "\033[1;32m$1\033[0m" -} - -warn() { - echo -e "\033[1;33mWARNING: $1\033[0m" -} - -err() { - echo -e "\033[1;31mERROR: $1\033[0m" >&2 -} - -zktoolbox_banner() { - printf ' - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -███████╗██╗ ██╗ ████████╗ ██████╗ ██████╗ ██╗ ██████╗ ██████╗ ██╗ ██╗ -╚══███╔╝██║ ██╔╝ ╚══██╔══╝██╔═══██╗██╔═══██╗██║ ██╔══██╗██╔═══██╗╚██╗██╔╝ - ███╔╝ █████╔╝ ██║ ██║ ██║██║ ██║██║ ██████╔╝██║ ██║ ╚███╔╝ - ███╔╝ ██╔═██╗ ██║ ██║ ██║██║ ██║██║ ██╔══██╗██║ ██║ ██╔██╗ -███████╗██║ ██╗ ██║ ╚██████╔╝╚██████╔╝███████╗██████╔╝╚██████╔╝██╔╝ ██╗ -╚══════╝╚═╝ ╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚══════╝╚═════╝ ╚═════╝ ╚═╝ ╚═╝ - - - A Comprehensive Toolkit for Creating and Managing ZK Stack Chains - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -Repo : https://github.com/matter-labs/zksync-era/ -Docs : https://docs.zksync.io/ -Contribute : https://github.com/matter-labs/zksync-era/pulls - -=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= - -' -} - -main "$@" diff --git a/zk_toolbox/CHANGELOG.md b/zkstack_cli/CHANGELOG.md similarity index 100% rename from zk_toolbox/CHANGELOG.md rename to zkstack_cli/CHANGELOG.md diff --git a/zk_toolbox/Cargo.lock b/zkstack_cli/Cargo.lock similarity index 96% rename from zk_toolbox/Cargo.lock rename to zkstack_cli/Cargo.lock index 4d985bb8b09..1427939f4ef 100644 --- a/zk_toolbox/Cargo.lock +++ b/zkstack_cli/Cargo.lock @@ -14,9 +14,9 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.24.1" +version = "0.24.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" +checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" dependencies = [ "gimli", ] @@ -161,9 +161,9 @@ dependencies = [ [[package]] name = "async-stream" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd56dd203fef61ac097dd65721a419ddccb106b2d2b70ba60a6b529f03961a51" +checksum = "0b5a71a6f37880a80d1d7f19efd781e4b5de42c88f0722cc13bcb6cc2cfe8476" dependencies = [ "async-stream-impl", "futures-core", @@ -172,9 +172,9 @@ dependencies = [ [[package]] name = "async-stream-impl" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" +checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", @@ -584,9 +584,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.23" +version = "1.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3bbb537bb4a30b90362caddba8f360c0a56bc13d3a5570028e7197204cb54a17" +checksum = "2e80e3b6a3ab07840e1cae9b0666a63970dc28e8ed5ffbcdacbfc760c281bfc1" dependencies = [ "jobserver", "libc", @@ -658,9 +658,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" +checksum = "b97f376d85a664d5837dbae44bf546e6477a679ff6610010f17276f686d867e8" dependencies = [ "clap_builder", "clap_derive", @@ -677,9 +677,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.18" +version = "4.5.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" +checksum = "19bc80abd44e4bed93ca373a0704ccbd1b710dc5749406201bb018272808dc54" dependencies = [ "anstream", "anstyle", @@ -688,6 +688,15 @@ dependencies = [ "terminal_size", ] +[[package]] +name = "clap_complete" +version = "4.5.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9646e2e245bf62f45d39a0f3f36f1171ad1ea0d6967fd114bca72cb02a8fcdfb" +dependencies = [ + "clap", +] + [[package]] name = "clap_derive" version = "4.5.18" @@ -1182,6 +1191,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", + "serde", ] [[package]] @@ -1991,9 +2001,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" dependencies = [ "futures-channel", "futures-core", @@ -2006,9 +2016,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" dependencies = [ "futures-core", "futures-sink", @@ -2016,15 +2026,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" dependencies = [ "futures-core", "futures-task", @@ -2044,9 +2054,9 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" [[package]] name = "futures-locks" @@ -2060,9 +2070,9 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", @@ -2071,15 +2081,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" [[package]] name = "futures-timer" @@ -2093,9 +2103,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" dependencies = [ "futures-channel", "futures-core", @@ -2144,9 +2154,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.0" +version = "0.31.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" +checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" [[package]] name = "git_version_macro" @@ -2241,7 +2251,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2260,7 +2270,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.5.0", + "indexmap 2.6.0", "slab", "tokio", "tokio-util", @@ -2283,6 +2293,12 @@ dependencies = [ "allocator-api2", ] +[[package]] +name = "hashbrown" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e087f84d4f86bf4b218b927129862374b72199ae7d8657835f1e89000eea4fb" + [[package]] name = "hashers" version = "1.0.1" @@ -2415,9 +2431,9 @@ dependencies = [ [[package]] name = "httparse" -version = "1.9.4" +version = "1.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" +checksum = "7d71d3574edd2771538b901e6549113b4006ece66150fb69c0fb6d9a2adae946" [[package]] name = "httpdate" @@ -2511,7 +2527,7 @@ dependencies = [ "hyper 1.4.1", "hyper-util", "log", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -2674,12 +2690,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" +checksum = "707907fe3c25f5424cce2cb7e1cbcafee6bdbe735ca90ef77c29e84591e5b9da" dependencies = [ "equivalent", - "hashbrown 0.14.5", + "hashbrown 0.15.0", ] [[package]] @@ -2715,9 +2731,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.10.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" +checksum = "ddc24109865250148c2e0f3d25d4f0f479571723792d3802153c60922a4fb708" [[package]] name = "is_terminal_polyfill" @@ -2798,9 +2814,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" +checksum = "0cb94a0ffd3f3ee755c20f7d8752f45cac88605a4dcf808abcff72873296ec7b" dependencies = [ "wasm-bindgen", ] @@ -2834,7 +2850,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core", "pin-project", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "rustls-platform-verifier", "soketto", @@ -2887,7 +2903,7 @@ dependencies = [ "hyper-util", "jsonrpsee-core", "jsonrpsee-types", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-platform-verifier", "serde", "serde_json", @@ -3485,21 +3501,18 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.4" +version = "0.36.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" +checksum = "aedf0a2d09c573ed1d8d85b30c119153926a2b36dce0ab28322c09a117a4683e" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.20.1" +version = "1.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" -dependencies = [ - "portable-atomic", -] +checksum = "1261fe7e33c73b354eab43b1273a57c8f967d0391e80353e51f764ac02cf6775" [[package]] name = "open-fastrlp" @@ -3606,7 +3619,7 @@ dependencies = [ "bytes", "http 1.1.0", "opentelemetry", - "reqwest 0.12.7", + "reqwest 0.12.8", ] [[package]] @@ -3623,7 +3636,7 @@ dependencies = [ "opentelemetry-proto", "opentelemetry_sdk", "prost 0.13.3", - "reqwest 0.12.7", + "reqwest 0.12.8", "thiserror", "tokio", "tonic", @@ -3831,7 +3844,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.5.0", + "indexmap 2.6.0", ] [[package]] @@ -3899,18 +3912,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" +checksum = "baf123a161dde1e524adf36f90bc5d8d3462824a9c43553ad07a8183161189ec" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.5" +version = "1.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" +checksum = "a4502d8515ca9f32f1fb543d987f63d95a14934883db45bdb48060b6b69257f8" dependencies = [ "proc-macro2", "quote", @@ -4048,9 +4061,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.86" +version = "1.0.87" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e719e8df665df0d1c8fbfd238015744736151d4445ec0836b8e628aae103b77" +checksum = "b3e4daa0dcf6feba26f985457cdf104d4b4256fc5a09547140f3631bb076b19a" dependencies = [ "unicode-ident", ] @@ -4406,9 +4419,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.12.7" +version = "0.12.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" +checksum = "f713147fbe92361e52392c73b8c9e48c04c6625bce969ef54dc901e58e042a7b" dependencies = [ "base64 0.22.1", "bytes", @@ -4432,7 +4445,7 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "serde", "serde_json", "serde_urlencoded", @@ -4610,9 +4623,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.13" +version = "0.23.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" +checksum = "415d9944693cb90382053259f89fbb077ea730ad7273047ec63b19bc9b160ba8" dependencies = [ "aws-lc-rs", "log", @@ -4631,7 +4644,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.1.3", + "rustls-pemfile 2.2.0", "rustls-pki-types", "schannel", "security-framework", @@ -4648,11 +4661,10 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.3" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" +checksum = "dce314e5fee3f39953d46bb63bb8a46d40c2f8fb7cc5a3b6cab2bde9721d6e50" dependencies = [ - "base64 0.22.1", "rustls-pki-types", ] @@ -4673,7 +4685,7 @@ dependencies = [ "jni", "log", "once_cell", - "rustls 0.23.13", + "rustls 0.23.14", "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki 0.102.8", @@ -4767,9 +4779,9 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" +checksum = "01227be5826fa0690321a2ba6c5cd57a19cf3f6a09e76973b58e61de6ab9d1c1" dependencies = [ "windows-sys 0.59.0", ] @@ -5102,7 +5114,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "itoa", "ryu", "serde", @@ -5398,7 +5410,7 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.5.0", + "indexmap 2.6.0", "log", "memchr", "once_cell", @@ -5560,9 +5572,9 @@ dependencies = [ [[package]] name = "sqruff-lib" -version = "0.19.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd3d7d11b58d658bf0e33d6729a92a81790ffb757440828a7b01869a40314b5f" +checksum = "676775189e83a98fc603d59fc6d760a66895d511502a538081dac993fde1a09a" dependencies = [ "ahash", "anstyle", @@ -5573,7 +5585,7 @@ dependencies = [ "enum_dispatch", "fancy-regex", "getrandom", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.13.0", "lazy-regex", "nohash-hasher", @@ -5595,14 +5607,14 @@ dependencies = [ [[package]] name = "sqruff-lib-core" -version = "0.19.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03b19ebfd19c2bb1fdf8ca626f451645d89b74fa696f3cc1286989e58436f791" +checksum = "48ec5ba65376ae9ba3e3dda153668dcb6452a7212ee7b4c9d48e053eb4f0f3fa" dependencies = [ "ahash", "enum_dispatch", "fancy-regex", - "indexmap 2.5.0", + "indexmap 2.6.0", "itertools 0.13.0", "nohash-hasher", "pretty_assertions", @@ -5616,9 +5628,9 @@ dependencies = [ [[package]] name = "sqruff-lib-dialects" -version = "0.19.1" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "60dc004661c65d9163edaa876e6bb2fbe7a0bcf7f00cb0e13428cd0b4ab4b27f" +checksum = "00fa1cd168dad593f8f6996d805acc1fd52c6d0ad0f6f5847a9cc22a6198cfc2" dependencies = [ "ahash", "itertools 0.13.0", @@ -5834,12 +5846,12 @@ dependencies = [ [[package]] name = "terminal_size" -version = "0.3.0" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +checksum = "4f599bd7ca042cfdf8f4512b277c02ba102247820f9d9d4a9f521f496751a6ef" dependencies = [ "rustix", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -6022,7 +6034,7 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.13", + "rustls 0.23.14", "rustls-pki-types", "tokio", ] @@ -6094,7 +6106,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "toml_datetime", "winnow 0.5.40", ] @@ -6105,7 +6117,7 @@ version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.5.0", + "indexmap 2.6.0", "serde", "serde_spanned", "toml_datetime", @@ -6374,9 +6386,9 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.15" +version = "0.3.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" +checksum = "5ab17db44d7388991a428b2ee655ce0c212e862eff1768a455c58f9aad6e7893" [[package]] name = "unicode-ident" @@ -6401,9 +6413,9 @@ dependencies = [ [[package]] name = "unicode-properties" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" +checksum = "e70f2a8b45122e719eb623c01822704c4e0907e7e426a05927e1a1cfff5b75d0" [[package]] name = "unicode-width" @@ -6587,9 +6599,9 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" +checksum = "ef073ced962d62984fb38a36e5fdc1a2b23c9e0e1fa0689bb97afa4202ef6887" dependencies = [ "cfg-if", "once_cell", @@ -6598,9 +6610,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" +checksum = "c4bfab14ef75323f4eb75fa52ee0a3fb59611977fd3240da19b2cf36ff85030e" dependencies = [ "bumpalo", "log", @@ -6613,9 +6625,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" +checksum = "65471f79c1022ffa5291d33520cbbb53b7687b01c2f8e83b57d102eed7ed479d" dependencies = [ "cfg-if", "js-sys", @@ -6625,9 +6637,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" +checksum = "a7bec9830f60924d9ceb3ef99d55c155be8afa76954edffbb5936ff4509474e7" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6635,9 +6647,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" +checksum = "4c74f6e152a76a2ad448e223b0fc0b6b5747649c3d769cc6bf45737bf97d0ed6" dependencies = [ "proc-macro2", "quote", @@ -6648,15 +6660,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.93" +version = "0.2.94" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" +checksum = "a42f6c679374623f295a8623adfe63d9284091245c3504bde47c17a3ce2777d9" [[package]] name = "web-sys" -version = "0.3.70" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" +checksum = "44188d185b5bdcae1052d08bcbcf9091a5524038d4572cc4f4f2bb9d5554ddd9" dependencies = [ "js-sys", "wasm-bindgen", @@ -7101,24 +7113,46 @@ dependencies = [ ] [[package]] -name = "zk_inception" +name = "zkevm_opcode_defs" +version = "0.132.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" +dependencies = [ + "bitflags 2.6.0", + "blake2", + "ethereum-types", + "k256 0.11.6", + "lazy_static", + "sha2_ce", + "sha3_ce", +] + +[[package]] +name = "zkstack" version = "0.1.0" dependencies = [ "anyhow", + "chrono", "clap", "clap-markdown", + "clap_complete", "cliclack", "common", "config", + "dirs", "ethers", - "eyre", + "futures", "human-panic", "lazy_static", + "prost 0.12.6", + "rand", + "reqwest 0.12.8", "secrecy", "serde", "serde_json", "serde_yaml", "slugify-rs", + "sqruff-lib", "strum", "thiserror", "tokio", @@ -7130,51 +7164,15 @@ dependencies = [ "zksync_config", "zksync_consensus_crypto", "zksync_consensus_roles", + "zksync_consensus_utils", + "zksync_protobuf", + "zksync_protobuf_build", + "zksync_protobuf_config", "zksync_system_constants", "zksync_types", "zksync_web3_decl", ] -[[package]] -name = "zk_supervisor" -version = "0.1.0" -dependencies = [ - "anyhow", - "chrono", - "clap", - "clap-markdown", - "common", - "config", - "ethers", - "futures", - "human-panic", - "serde", - "serde_json", - "serde_yaml", - "sqruff-lib", - "strum", - "tokio", - "types", - "url", - "xshell", - "zksync_basic_types", -] - -[[package]] -name = "zkevm_opcode_defs" -version = "0.132.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0769f7b27d8fb06e715da3290c575cac5d04d10a557faef180e847afce50ac4" -dependencies = [ - "bitflags 2.6.0", - "blake2", - "ethereum-types", - "k256 0.11.6", - "lazy_static", - "sha2_ce", - "sha3_ce", -] - [[package]] name = "zksync_basic_types" version = "0.1.0" @@ -7196,9 +7194,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" +checksum = "035269d811b3770debca372141ab64cad067dce8e58cb39a48cb7617d30c626b" dependencies = [ "anyhow", "once_cell", @@ -7221,7 +7219,11 @@ dependencies = [ "rand", "secrecy", "serde", + "strum", + "strum_macros", + "time", "url", + "vise", "zksync_basic_types", "zksync_concurrency", "zksync_consensus_utils", @@ -7230,9 +7232,9 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" +checksum = "49e38d1b5ed28c66e785caff53ea4863375555d818aafa03290397192dd3e665" dependencies = [ "anyhow", "blst", @@ -7251,9 +7253,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" +checksum = "e49fbd4e69b276058f3dfc06cf6ada0e8caa6ed826e81289e4d596da95a0f17a" dependencies = [ "anyhow", "bit-vec", @@ -7273,9 +7275,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" +checksum = "10bac8f471b182d4fa3d40cf158aac3624fe636a1ff0b4cf3fe26a0e20c68a42" dependencies = [ "anyhow", "rand", @@ -7325,9 +7327,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" +checksum = "abd55c64f54cb10967a435422f66ff5880ae14a232b245517c7ce38da32e0cab" dependencies = [ "anyhow", "bit-vec", @@ -7346,9 +7348,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.3.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" +checksum = "4121952bcaf711005dd554612fc6e2de9b30cb58088508df87f1d38046ce8ac8" dependencies = [ "anyhow", "heck", @@ -7372,6 +7374,7 @@ dependencies = [ "secrecy", "serde_json", "serde_yaml", + "time", "tracing", "zksync_basic_types", "zksync_config", @@ -7406,7 +7409,6 @@ dependencies = [ "once_cell", "prost 0.12.6", "rlp", - "secp256k1", "serde", "serde_json", "serde_with", @@ -7414,7 +7416,6 @@ dependencies = [ "thiserror", "tracing", "zksync_basic_types", - "zksync_config", "zksync_contracts", "zksync_crypto_primitives", "zksync_mini_merkle_tree", @@ -7434,7 +7435,7 @@ dependencies = [ "hex", "num", "once_cell", - "reqwest 0.12.7", + "reqwest 0.12.8", "serde", "serde_json", "thiserror", @@ -7480,7 +7481,7 @@ dependencies = [ "jsonrpsee", "pin-project-lite", "rlp", - "rustls 0.23.13", + "rustls 0.23.14", "serde", "serde_json", "thiserror", diff --git a/zk_toolbox/Cargo.toml b/zkstack_cli/Cargo.toml similarity index 81% rename from zk_toolbox/Cargo.toml rename to zkstack_cli/Cargo.toml index 0c447f18f07..b89ef9e62b3 100644 --- a/zk_toolbox/Cargo.toml +++ b/zkstack_cli/Cargo.toml @@ -3,8 +3,7 @@ members = [ "crates/common", "crates/config", "crates/types", - "crates/zk_inception", - "crates/zk_supervisor", + "crates/zkstack", "crates/git_version_macro", ] resolver = "2" @@ -16,8 +15,8 @@ homepage = "https://zksync.io/" license = "MIT OR Apache-2.0" authors = ["The Matter Labs Team "] exclude = ["./github"] -repository = "https://github.com/matter-labs/zk_toolbox/" -description = "ZK Toolbox is a set of tools for working with zk stack." +repository = "https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli/" +description = "ZK Stack CLI is a set of tools for working with zk stack." keywords = ["zk", "cryptography", "blockchain", "ZKStack", "ZKsync"] @@ -33,25 +32,29 @@ zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } zksync_system_constants = { path = "../core/lib/constants" } -zksync_consensus_roles = "=0.3.0" -zksync_consensus_crypto = "=0.3.0" -zksync_protobuf = "=0.3.0" +zksync_consensus_roles = "=0.5.0" +zksync_consensus_crypto = "=0.5.0" +zksync_consensus_utils = "=0.5.0" +zksync_protobuf = "=0.5.0" +zksync_protobuf_build = "=0.5.0" zksync_types = { path = "../core/lib/types" } zksync_web3_decl = { path = "../core/lib/web3_decl" } # External dependencies anyhow = "1.0.82" clap = { version = "4.4", features = ["derive", "wrap_help", "string"] } +clap_complete = "4.5.33" +dirs = "5.0.1" slugify-rs = "0.0.3" cliclack = "0.2.5" console = "0.15.8" chrono = "0.4.38" -eyre = "0.6.12" ethers = "2.0" futures = "0.3.30" human-panic = "2.0" lazy_static = "1.4.0" once_cell = "1.19.0" +prost = "0.12.1" rand = "0.8.5" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" diff --git a/zk_toolbox/README.md b/zkstack_cli/README.md similarity index 84% rename from zk_toolbox/README.md rename to zkstack_cli/README.md index a3b44fa98b3..e8116508821 100644 --- a/zk_toolbox/README.md +++ b/zkstack_cli/README.md @@ -1,11 +1,7 @@ -# zk_toolbox +# ZK Stack CLI -Toolkit for creating and managing ZK Stack chains. - -## ZK Inception - -`ZK Inception` facilitates the creation and management of ZK Stacks. Commands are interactive but can also accept -arguments via the command line. +Toolkit for creating and managing ZK Stack chains. `ZK Stack CLI` facilitates the creation and management of ZK Stacks. +Commands are interactive but can also accept arguments via the command line. ### Dependencies @@ -14,19 +10,35 @@ dependencies on your machine. Ignore the Environment section for now. ### Installation -Install `zk_inception` from Git: +You can use `zkstackup` to install and manage `zkstack`: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +Then install the most recent version with: ```bash -cargo install --git https://github.com/matter-labs/zksync-era/ --locked zk_inception zk_supervisor --force +zkstackup ``` Or manually build from a local copy of the [ZKsync](https://github.com/matter-labs/zksync-era/) repository: ```bash -./bin/zkt +zkstackup --local +``` + +This command installs `zkstack` from the current repository. + +#### Manual installation + +Run from the repository root: + +```bash +cargo install --path zkstack_cli/crates/zkstack --force --locked ``` -This command installs `zk_inception` and `zk_supervisor` from the current repository. +And make sure that `.cargo/bin` is included into `PATH`. ### Foundry Integration @@ -51,13 +63,13 @@ BridgeHub, shared bridges, and state transition managers. To create a ZK Stack project, start by creating an ecosystem: ```bash -zk_inception ecosystem create +zkstack ecosystem create ``` If you choose not to start database & L1 containers after creating the ecosystem, you can later run: ```bash -zk_inception containers +zkstack containers ``` Execute subsequent commands from within the created ecosystem folder: @@ -71,14 +83,14 @@ cd path/to/ecosystem/name If the ecosystem has never been deployed before, initialize it: ```bash -zk_inception ecosystem init +zkstack ecosystem init ``` This initializes the first ZK chain, which becomes the default. Override with `--chain ` if needed. For default params, use: ```bash -zk_inception ecosystem init --dev +zkstack ecosystem init --dev ``` If the process gets stuck, resume it with `--resume`. This flag keeps track of already sent transactions and sends new @@ -98,7 +110,7 @@ To verify contracts, use the `--verify` flag. To change the default ZK chain: ```bash -zk_inception ecosystem change-default-chain +zkstack ecosystem change-default-chain ``` IMPORTANT: Currently, you cannot use an existing ecosystem to register a new chain. This feature will be added in the @@ -109,19 +121,19 @@ future. To setup [era-observability](https://github.com/matter-labs/era-observability): ```bash -zk_inception ecosystem setup-observability +zkstack ecosystem setup-observability ``` Or run: ```bash -zk_inception ecosystem init --observability +zkstack ecosystem init --observability ``` To start observability containers: ```bash -zk_inception containers --observability +zkstack containers --observability ``` ### ZK Chain @@ -131,7 +143,7 @@ zk_inception containers --observability The first ZK chain is generated upon ecosystem creation. Create additional chains and switch between them: ```bash -zk_inception chain create +zkstack chain create ``` #### Init @@ -139,7 +151,7 @@ zk_inception chain create Deploy contracts and initialize Zk Chain: ```bash -zk_inception chain init +zkstack chain init ``` This registers the chain in the BridgeHub and deploys all necessary contracts. Manual initialization steps: @@ -154,7 +166,7 @@ by a third party). To run the chain: ```bash -zk_inception server +zkstack server ``` You can specify the component you want to run using `--components` flag @@ -180,19 +192,13 @@ information. Initialize the prover: ```bash -zk_inception prover init -``` - -Generate setup keys: - -```bash -zk_inception prover generate-sk +zkstack prover init ``` Run the prover: ```bash -zk_inception prover run +zkstack prover run ``` Specify the prover component with `--component `. Components: @@ -208,13 +214,13 @@ For `witness-generator`, specify the round with `--round `. Rounds: Download required binaries (`solc`, `zksolc`, `vyper`, `zkvyper`): ```bash -zk_inception contract-verifier init +zkstack contract-verifier init ``` Run the contract verifier: ```bash -zk_inception contract-verifier run +zkstack contract-verifier run ``` ### External Node @@ -226,7 +232,7 @@ Commands for running an external node: Prepare configs: ```bash -zk_inception en configs +zkstack en configs ``` This ensures no port conflicts with the main node. @@ -236,7 +242,7 @@ This ensures no port conflicts with the main node. Prepare the databases: ```bash -zk_inception en init +zkstack en init ``` #### Run @@ -244,7 +250,7 @@ zk_inception en init Run the external node: ```bash -zk_inception en run +zkstack en run ``` ### Portal @@ -253,7 +259,7 @@ Once you have at least one chain initialized, you can run the [portal](https://g web-app to bridge tokens between L1 and L2 and more: ```bash -zk_inception portal +zkstack portal ``` This command will start the dockerized portal app using configuration from `apps/portal.config.json` file inside your @@ -269,7 +275,7 @@ contracts and more. First, each chain should be initialized: ```bash -zk_inception explorer init +zkstack explorer init ``` This command creates a database to store explorer data and generatesdocker compose file with explorer services @@ -278,7 +284,7 @@ This command creates a database to store explorer data and generatesdocker compo Next, for each chain you want to have an explorer, you need to start its backend services: ```bash -zk_inception explorer backend --chain +zkstack explorer backend --chain ``` This command uses previously created docker compose file to start the services (api, data fetcher, worker) required for @@ -287,7 +293,7 @@ the explorer. Finally, you can run the explorer app: ```bash -zk_inception explorer run +zkstack explorer run ``` This command will start the dockerized explorer app using configuration from `apps/explorer.config.json` file inside @@ -299,22 +305,22 @@ your ecosystem directory. You can edit this file to configure the app if needed. To update your node: ```bash -zk_inception update +zkstack update ``` This command pulls the latest changes, syncs the general config for all chains, and raises a warning if L1 upgrades are needed. -## ZK Supervisor +## Dev -Tools for developing ZKsync. +The subcommand `zkstack dev` offers tools for developing ZKsync. ### Database Commands for database manipulation: ```bash -zk_supervisor db +zkstack dev db ``` Possible commands: @@ -332,7 +338,7 @@ Possible commands: Clean artifacts: ```bash -zk_supervisor clean +zkstack dev clean ``` Possible commands: @@ -346,7 +352,7 @@ Possible commands: Run ZKsync tests: ```bash -zk_supervisor test +zkstack dev test ``` Possible commands: @@ -364,7 +370,7 @@ Possible commands: Create a snapshot of the current chain: ```bash -zks snapshot create +zkstack dev snapshot create ``` ### Contracts @@ -372,7 +378,7 @@ zks snapshot create Build contracts: ```bash -zks contracts +zkstack dev contracts ``` ### Format @@ -380,7 +386,7 @@ zks contracts Format code: ```bash -zks fmt +zkstack dev fmt ``` By default, this command runs all formatters. To run a specific fomatter use the following subcommands: @@ -394,7 +400,7 @@ By default, this command runs all formatters. To run a specific fomatter use the Lint code: ```bash -zks lint +zkstack dev lint ``` By default, this command runs the linter on all files. To target specific file types, use the `--target` option. diff --git a/zk_toolbox/crates/common/Cargo.toml b/zkstack_cli/crates/common/Cargo.toml similarity index 100% rename from zk_toolbox/crates/common/Cargo.toml rename to zkstack_cli/crates/common/Cargo.toml diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zkstack_cli/crates/common/src/cmd.rs similarity index 100% rename from zk_toolbox/crates/common/src/cmd.rs rename to zkstack_cli/crates/common/src/cmd.rs diff --git a/zk_toolbox/crates/common/src/config.rs b/zkstack_cli/crates/common/src/config.rs similarity index 100% rename from zk_toolbox/crates/common/src/config.rs rename to zkstack_cli/crates/common/src/config.rs diff --git a/zkstack_cli/crates/common/src/contracts.rs b/zkstack_cli/crates/common/src/contracts.rs new file mode 100644 index 00000000000..0f771bb9dad --- /dev/null +++ b/zkstack_cli/crates/common/src/contracts.rs @@ -0,0 +1,52 @@ +use std::path::PathBuf; + +use xshell::{cmd, Shell}; + +use crate::cmd::Cmd; + +pub fn build_test_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("etc/contracts-test-data")); + Cmd::new(cmd!(shell, "yarn install")).run()?; + Ok(Cmd::new(cmd!(shell, "yarn build")).run()?) +} + +pub fn build_l1_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/l1-contracts")); + Ok(Cmd::new(cmd!(shell, "yarn build")).run()?) +} + +pub fn build_l1_da_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/da-contracts")); + Ok(Cmd::new(cmd!(shell, "forge build")).run()?) +} + +pub fn build_l2_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/l2-contracts")); + // Ok(Cmd::new(cmd!( + // shell, + // "forge build --zksync --zk-enable-eravm-extensions" + // )) + // .run()?) + Cmd::new(cmd!(shell, "yarn build")).run()?; + Ok(()) +} + +pub fn build_system_contracts(shell: Shell, link_to_code: PathBuf) -> anyhow::Result<()> { + let _dir_guard = shell.push_dir(link_to_code.join("contracts/system-contracts")); + // Do not update era-contract's lockfile to avoid dirty submodule + Cmd::new(cmd!(shell, "yarn install --frozen-lockfile")).run()?; + Cmd::new(cmd!(shell, "yarn build")).run()?; + Ok(()) + // Cmd::new(cmd!(shell, "yarn preprocess:system-contracts")).run()?; + // Cmd::new(cmd!( + // shell, + // "forge build --zksync --zk-enable-eravm-extensions" + // )) + // .run()?; + // Cmd::new(cmd!(shell, "yarn preprocess:bootloader")).run()?; + // Ok(Cmd::new(cmd!( + // shell, + // "forge build --zksync --zk-enable-eravm-extensions" + // )) + // .run()?) +} diff --git a/zk_toolbox/crates/common/src/db.rs b/zkstack_cli/crates/common/src/db.rs similarity index 100% rename from zk_toolbox/crates/common/src/db.rs rename to zkstack_cli/crates/common/src/db.rs diff --git a/zk_toolbox/crates/common/src/docker.rs b/zkstack_cli/crates/common/src/docker.rs similarity index 89% rename from zk_toolbox/crates/common/src/docker.rs rename to zkstack_cli/crates/common/src/docker.rs index a5731808814..71e2040ee31 100644 --- a/zk_toolbox/crates/common/src/docker.rs +++ b/zkstack_cli/crates/common/src/docker.rs @@ -14,7 +14,11 @@ pub fn up(shell: &Shell, docker_compose_file: &str, detach: bool) -> anyhow::Res } pub fn down(shell: &Shell, docker_compose_file: &str) -> anyhow::Result<()> { - Ok(Cmd::new(cmd!(shell, "docker compose -f {docker_compose_file} down")).run()?) + Ok(Cmd::new(cmd!( + shell, + "docker compose -f {docker_compose_file} down -v" + )) + .run()?) } pub fn run(shell: &Shell, docker_image: &str, docker_args: Vec) -> anyhow::Result<()> { diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zkstack_cli/crates/common/src/ethereum.rs similarity index 96% rename from zk_toolbox/crates/common/src/ethereum.rs rename to zkstack_cli/crates/common/src/ethereum.rs index 33caaad9789..2100746fecf 100644 --- a/zk_toolbox/crates/common/src/ethereum.rs +++ b/zkstack_cli/crates/common/src/ethereum.rs @@ -6,18 +6,17 @@ use ethers::{ middleware::MiddlewareBuilder, prelude::{Http, LocalWallet, Provider, Signer, SignerMiddleware}, providers::Middleware, - types::{Address, TransactionRequest, H256}, + types::{Address, TransactionRequest}, }; use types::TokenInfo; use crate::{logger, wallets::Wallet}; pub fn create_ethers_client( - private_key: H256, + mut wallet: LocalWallet, l1_rpc: String, chain_id: Option, ) -> anyhow::Result, ethers::prelude::Wallet>> { - let mut wallet = LocalWallet::from_bytes(private_key.as_bytes())?; if let Some(chain_id) = chain_id { wallet = wallet.with_chain_id(chain_id); } diff --git a/zk_toolbox/crates/common/src/external_node.rs b/zkstack_cli/crates/common/src/external_node.rs similarity index 100% rename from zk_toolbox/crates/common/src/external_node.rs rename to zkstack_cli/crates/common/src/external_node.rs diff --git a/zk_toolbox/crates/common/src/files.rs b/zkstack_cli/crates/common/src/files.rs similarity index 100% rename from zk_toolbox/crates/common/src/files.rs rename to zkstack_cli/crates/common/src/files.rs diff --git a/zk_toolbox/crates/common/src/forge.rs b/zkstack_cli/crates/common/src/forge.rs similarity index 97% rename from zk_toolbox/crates/common/src/forge.rs rename to zkstack_cli/crates/common/src/forge.rs index 846685ab29a..a03795facfa 100644 --- a/zk_toolbox/crates/common/src/forge.rs +++ b/zkstack_cli/crates/common/src/forge.rs @@ -159,10 +159,12 @@ impl ForgeScript { } // Do not start the script if balance is not enough - pub fn private_key(&self) -> Option { + pub fn private_key(&self) -> Option { self.args.args.iter().find_map(|a| { if let ForgeScriptArg::PrivateKey { private_key } = a { - Some(H256::from_str(private_key).unwrap()) + let key = H256::from_str(private_key).unwrap(); + let key = LocalWallet::from_bytes(key.as_bytes()).unwrap(); + Some(key) } else { None } @@ -180,11 +182,7 @@ impl ForgeScript { } pub fn address(&self) -> Option
{ - self.private_key().and_then(|a| { - LocalWallet::from_bytes(a.as_bytes()) - .ok() - .map(|a| Address::from_slice(a.address().as_bytes())) - }) + self.private_key().map(|k| k.address()) } pub async fn get_the_balance(&self) -> anyhow::Result> { @@ -299,7 +297,7 @@ pub struct ForgeScriptArgs { pub zksync: bool, /// List of additional arguments that can be passed through the CLI. /// - /// e.g.: `zk_inception init -a --private-key=` + /// e.g.: `zkstack init -a --private-key=` #[clap(long, short)] #[arg(trailing_var_arg = true, allow_hyphen_values = true, hide = false)] additional_args: Vec, diff --git a/zk_toolbox/crates/common/src/git.rs b/zkstack_cli/crates/common/src/git.rs similarity index 100% rename from zk_toolbox/crates/common/src/git.rs rename to zkstack_cli/crates/common/src/git.rs diff --git a/zk_toolbox/crates/common/src/hardhat.rs b/zkstack_cli/crates/common/src/hardhat.rs similarity index 100% rename from zk_toolbox/crates/common/src/hardhat.rs rename to zkstack_cli/crates/common/src/hardhat.rs diff --git a/zk_toolbox/crates/common/src/lib.rs b/zkstack_cli/crates/common/src/lib.rs similarity index 91% rename from zk_toolbox/crates/common/src/lib.rs rename to zkstack_cli/crates/common/src/lib.rs index 6dc26bbba9f..91804bfe070 100644 --- a/zk_toolbox/crates/common/src/lib.rs +++ b/zkstack_cli/crates/common/src/lib.rs @@ -4,6 +4,7 @@ mod term; pub mod cmd; pub mod config; +pub mod contracts; pub mod db; pub mod docker; pub mod ethereum; @@ -20,7 +21,7 @@ pub mod yaml; pub use prerequisites::{ check_general_prerequisites, check_prerequisites, GCLOUD_PREREQUISITE, GPU_PREREQUISITES, - PROVER_CLI_PREREQUISITE, WGET_PREREQUISITE, + PROVER_CLI_PREREQUISITE, }; pub use prompt::{init_prompt_theme, Prompt, PromptConfirm, PromptSelect}; pub use term::{error, logger, spinner}; diff --git a/zk_toolbox/crates/common/src/prerequisites.rs b/zkstack_cli/crates/common/src/prerequisites.rs similarity index 52% rename from zk_toolbox/crates/common/src/prerequisites.rs rename to zkstack_cli/crates/common/src/prerequisites.rs index 665096d8486..72d3c7d8041 100644 --- a/zk_toolbox/crates/common/src/prerequisites.rs +++ b/zkstack_cli/crates/common/src/prerequisites.rs @@ -2,79 +2,99 @@ use xshell::{cmd, Shell}; use crate::{cmd::Cmd, logger}; -const PREREQUISITES: [Prerequisite; 5] = [ - Prerequisite { - name: "git", - download_link: "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git", - }, - Prerequisite { - name: "docker", - download_link: "https://docs.docker.com/get-docker/", - }, - Prerequisite { - name: "forge", - download_link: "https://book.getfoundry.sh/getting-started/installation", - }, - Prerequisite { - name: "cargo", - download_link: "https://doc.rust-lang.org/cargo/getting-started/installation.html", - }, - Prerequisite { - name: "yarn", - download_link: "https://yarnpkg.com/getting-started/install", - }, -]; +fn prerequisites() -> [Prerequisite; 5] { + [ + Prerequisite { + name: "git", + download_link: "https://git-scm.com/book/en/v2/Getting-Started-Installing-Git", + custom_validator: None, + }, + Prerequisite { + name: "docker", + download_link: "https://docs.docker.com/get-docker/", + custom_validator: None, + }, + Prerequisite { + name: "forge", + download_link: + "https://github.com/matter-labs/foundry-zksync?tab=readme-ov-file#quick-install", + custom_validator: Some(Box::new(|| { + let shell = Shell::new().unwrap(); + let Ok(result) = Cmd::new(cmd!(shell, "forge build --help")).run_with_output() + else { + return false; + }; + let Ok(stdout) = String::from_utf8(result.stdout) else { + return false; + }; + stdout.contains("ZKSync configuration") + })), + }, + Prerequisite { + name: "cargo", + download_link: "https://doc.rust-lang.org/cargo/getting-started/installation.html", + custom_validator: None, + }, + Prerequisite { + name: "yarn", + download_link: "https://yarnpkg.com/getting-started/install", + custom_validator: None, + }, + ] +} const DOCKER_COMPOSE_PREREQUISITE: Prerequisite = Prerequisite { name: "docker compose", download_link: "https://docs.docker.com/compose/install/", + custom_validator: None, }; pub const GPU_PREREQUISITES: [Prerequisite; 3] = [ Prerequisite { name: "cmake", download_link: "https://cmake.org/download/", + custom_validator: None, }, Prerequisite { name: "nvcc", download_link: "https://developer.nvidia.com/cuda-downloads", + custom_validator: None, }, // CUDA toolkit Prerequisite { name: "nvidia-smi", download_link: "https://developer.nvidia.com/cuda-downloads", + custom_validator: None, }, // CUDA GPU driver ]; -pub const WGET_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { - name: "wget", - download_link: "https://www.gnu.org/software/wget/", -}]; - pub const GCLOUD_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "gcloud", download_link: "https://cloud.google.com/sdk/docs/install", + custom_validator: None, }]; pub const PROVER_CLI_PREREQUISITE: [Prerequisite; 1] = [Prerequisite { name: "prover_cli", download_link: "https://github.com/matter-labs/zksync-era/tree/main/prover/crates/bin/prover_cli", + custom_validator: None, }]; pub struct Prerequisite { name: &'static str, download_link: &'static str, + custom_validator: Option bool>>, } pub fn check_general_prerequisites(shell: &Shell) { - check_prerequisites(shell, &PREREQUISITES, true); + check_prerequisites(shell, &prerequisites(), true); } pub fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_compose: bool) { let mut missing_prerequisites = vec![]; for prerequisite in prerequisites { - if !check_prerequisite(shell, prerequisite.name) { + if !check_prerequisite(shell, prerequisite) { missing_prerequisites.push(prerequisite); } } @@ -100,8 +120,15 @@ pub fn check_prerequisites(shell: &Shell, prerequisites: &[Prerequisite], check_ } } -fn check_prerequisite(shell: &Shell, name: &str) -> bool { - Cmd::new(cmd!(shell, "which {name}")).run().is_ok() +fn check_prerequisite(shell: &Shell, prerequisite: &Prerequisite) -> bool { + let name = prerequisite.name; + if Cmd::new(cmd!(shell, "which {name}")).run().is_err() { + return false; + } + let Some(custom) = &prerequisite.custom_validator else { + return true; + }; + custom() } fn check_docker_compose_prerequisite(shell: &Shell) -> bool { diff --git a/zk_toolbox/crates/common/src/prompt/confirm.rs b/zkstack_cli/crates/common/src/prompt/confirm.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/confirm.rs rename to zkstack_cli/crates/common/src/prompt/confirm.rs diff --git a/zk_toolbox/crates/common/src/prompt/input.rs b/zkstack_cli/crates/common/src/prompt/input.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/input.rs rename to zkstack_cli/crates/common/src/prompt/input.rs diff --git a/zk_toolbox/crates/common/src/prompt/mod.rs b/zkstack_cli/crates/common/src/prompt/mod.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/mod.rs rename to zkstack_cli/crates/common/src/prompt/mod.rs diff --git a/zk_toolbox/crates/common/src/prompt/select.rs b/zkstack_cli/crates/common/src/prompt/select.rs similarity index 100% rename from zk_toolbox/crates/common/src/prompt/select.rs rename to zkstack_cli/crates/common/src/prompt/select.rs diff --git a/zk_toolbox/crates/common/src/server.rs b/zkstack_cli/crates/common/src/server.rs similarity index 100% rename from zk_toolbox/crates/common/src/server.rs rename to zkstack_cli/crates/common/src/server.rs diff --git a/zk_toolbox/crates/common/src/term/error.rs b/zkstack_cli/crates/common/src/term/error.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/error.rs rename to zkstack_cli/crates/common/src/term/error.rs diff --git a/zk_toolbox/crates/common/src/term/logger.rs b/zkstack_cli/crates/common/src/term/logger.rs similarity index 97% rename from zk_toolbox/crates/common/src/term/logger.rs rename to zkstack_cli/crates/common/src/term/logger.rs index 17e518d9ad9..035e81dc135 100644 --- a/zk_toolbox/crates/common/src/term/logger.rs +++ b/zkstack_cli/crates/common/src/term/logger.rs @@ -14,7 +14,7 @@ fn term_write(msg: impl Display) { } pub fn intro() { - cliclak_intro(style(" ZKsync toolbox ").on_cyan().black()).unwrap(); + cliclak_intro(style(" ZK Stack CLI ").on_cyan().black()).unwrap(); } pub fn outro(msg: impl Display) { diff --git a/zk_toolbox/crates/common/src/term/mod.rs b/zkstack_cli/crates/common/src/term/mod.rs similarity index 100% rename from zk_toolbox/crates/common/src/term/mod.rs rename to zkstack_cli/crates/common/src/term/mod.rs diff --git a/zkstack_cli/crates/common/src/term/spinner.rs b/zkstack_cli/crates/common/src/term/spinner.rs new file mode 100644 index 00000000000..3ec2631804a --- /dev/null +++ b/zkstack_cli/crates/common/src/term/spinner.rs @@ -0,0 +1,84 @@ +use std::{fmt::Display, io::IsTerminal, time::Instant}; + +use cliclack::{spinner, ProgressBar}; + +use crate::{config::global_config, logger}; + +/// Spinner is a helper struct to show a spinner while some operation is running. +pub struct Spinner { + msg: String, + output: SpinnerOutput, + time: Instant, +} + +impl Spinner { + /// Create a new spinner with a message. + pub fn new(msg: &str) -> Self { + let output = if std::io::stdout().is_terminal() { + let pb = spinner(); + pb.start(msg); + if global_config().verbose { + pb.stop(msg); + } + SpinnerOutput::Progress(pb) + } else { + logger::info(msg); + SpinnerOutput::Plain() + }; + Spinner { + msg: msg.to_owned(), + output, + time: Instant::now(), + } + } + + /// Manually finish the spinner. + pub fn finish(self) { + self.output.stop(format!( + "{} done in {} secs", + self.msg, + self.time.elapsed().as_secs_f64() + )); + } + + /// Interrupt the spinner with a failed message. + pub fn fail(self) { + self.output.error(format!( + "{} failed in {} secs", + self.msg, + self.time.elapsed().as_secs_f64() + )); + } + + /// Freeze the spinner with current message. + pub fn freeze(self) { + self.output.stop(self.msg); + } +} + +/// An abstraction that makes interactive progress bar optional in environments where virtual +/// terminal is not available. +/// +/// Uses plain `logger::{info,error}` as the fallback. +/// +/// See https://github.com/console-rs/indicatif/issues/530 for more details. +enum SpinnerOutput { + Progress(ProgressBar), + Plain(), +} + +impl SpinnerOutput { + fn error(&self, msg: impl Display) { + match self { + SpinnerOutput::Progress(pb) => pb.error(msg), + SpinnerOutput::Plain() => logger::error(msg), + } + } + + fn stop(self, msg: impl Display) { + match self { + SpinnerOutput::Progress(pb) => pb.stop(msg), + SpinnerOutput::Plain() => logger::info(msg), + } + } +} diff --git a/zk_toolbox/crates/common/src/version.rs b/zkstack_cli/crates/common/src/version.rs similarity index 100% rename from zk_toolbox/crates/common/src/version.rs rename to zkstack_cli/crates/common/src/version.rs diff --git a/zkstack_cli/crates/common/src/wallets.rs b/zkstack_cli/crates/common/src/wallets.rs new file mode 100644 index 00000000000..43a9864474c --- /dev/null +++ b/zkstack_cli/crates/common/src/wallets.rs @@ -0,0 +1,102 @@ +use ethers::{ + core::rand::{CryptoRng, Rng}, + signers::{coins_bip39::English, LocalWallet, MnemonicBuilder, Signer}, + types::{Address, H256}, +}; +use serde::{Deserialize, Serialize}; +use types::parse_h256; + +#[derive(Serialize, Deserialize)] +struct WalletSerde { + pub address: Address, + pub private_key: Option, +} + +#[derive(Debug, Clone)] +pub struct Wallet { + pub address: Address, + pub private_key: Option, +} + +impl<'de> Deserialize<'de> for Wallet { + fn deserialize>(d: D) -> Result { + let x = WalletSerde::deserialize(d)?; + Ok(match x.private_key { + None => Self { + address: x.address, + private_key: None, + }, + Some(k) => { + let k = LocalWallet::from_bytes(k.as_bytes()).map_err(serde::de::Error::custom)?; + if k.address() != x.address { + return Err(serde::de::Error::custom(format!( + "address does not match private key: got address {:#x}, want {:#x}", + x.address, + k.address(), + ))); + } + Self::new(k) + } + }) + } +} + +impl Serialize for Wallet { + fn serialize(&self, s: S) -> Result { + WalletSerde { + address: self.address, + private_key: self.private_key_h256(), + } + .serialize(s) + } +} + +impl Wallet { + pub fn private_key_h256(&self) -> Option { + self.private_key + .as_ref() + .map(|k| parse_h256(k.signer().to_bytes().as_slice()).unwrap()) + } + + pub fn random(rng: &mut (impl Rng + CryptoRng)) -> Self { + Self::new(LocalWallet::new(rng)) + } + + pub fn new(private_key: LocalWallet) -> Self { + Self { + address: private_key.address(), + private_key: Some(private_key), + } + } + + pub fn from_mnemonic(mnemonic: &str, base_path: &str, index: u32) -> anyhow::Result { + let wallet = MnemonicBuilder::::default() + .phrase(mnemonic) + .derivation_path(&format!("{}/{}", base_path, index))? + .build()?; + Ok(Self::new(wallet)) + } + + pub fn empty() -> Self { + Self { + address: Address::zero(), + private_key: None, + } + } +} + +#[test] +fn test_load_localhost_wallets() { + let wallet = Wallet::from_mnemonic( + "stuff slice staff easily soup parent arm payment cotton trade scatter struggle", + "m/44'/60'/0'/0", + 1, + ) + .unwrap(); + assert_eq!( + wallet.address, + Address::from_slice( + ðers::utils::hex::decode("0xa61464658AfeAf65CccaaFD3a512b69A83B77618").unwrap() + ) + ); +} diff --git a/zk_toolbox/crates/common/src/withdraw.rs b/zkstack_cli/crates/common/src/withdraw.rs similarity index 100% rename from zk_toolbox/crates/common/src/withdraw.rs rename to zkstack_cli/crates/common/src/withdraw.rs diff --git a/zk_toolbox/crates/common/src/yaml.rs b/zkstack_cli/crates/common/src/yaml.rs similarity index 100% rename from zk_toolbox/crates/common/src/yaml.rs rename to zkstack_cli/crates/common/src/yaml.rs diff --git a/zk_toolbox/crates/config/Cargo.toml b/zkstack_cli/crates/config/Cargo.toml similarity index 100% rename from zk_toolbox/crates/config/Cargo.toml rename to zkstack_cli/crates/config/Cargo.toml diff --git a/zk_toolbox/crates/config/src/apps.rs b/zkstack_cli/crates/config/src/apps.rs similarity index 96% rename from zk_toolbox/crates/config/src/apps.rs rename to zkstack_cli/crates/config/src/apps.rs index 697b35b0851..3bd611bdc32 100644 --- a/zk_toolbox/crates/config/src/apps.rs +++ b/zkstack_cli/crates/config/src/apps.rs @@ -5,7 +5,7 @@ use xshell::Shell; use crate::{ consts::{APPS_CONFIG_FILE, DEFAULT_EXPLORER_PORT, DEFAULT_PORTAL_PORT, LOCAL_CONFIGS_PATH}, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkStackConfig}, }; /// Ecosystem level configuration for the apps (portal and explorer). @@ -20,7 +20,7 @@ pub struct AppEcosystemConfig { pub http_port: u16, } -impl ZkToolboxConfig for AppsEcosystemConfig {} +impl ZkStackConfig for AppsEcosystemConfig {} impl FileConfigWithDefaultName for AppsEcosystemConfig { const FILE_NAME: &'static str = APPS_CONFIG_FILE; } diff --git a/zk_toolbox/crates/config/src/chain.rs b/zkstack_cli/crates/config/src/chain.rs similarity index 94% rename from zk_toolbox/crates/config/src/chain.rs rename to zkstack_cli/crates/config/src/chain.rs index 7240260b380..e6b0d4f61e7 100644 --- a/zk_toolbox/crates/config/src/chain.rs +++ b/zkstack_cli/crates/config/src/chain.rs @@ -17,7 +17,7 @@ use crate::{ create_localhost_wallets, traits::{ FileConfigWithDefaultName, ReadConfig, ReadConfigWithBasePath, SaveConfig, - SaveConfigWithBasePath, ZkToolboxConfig, + SaveConfigWithBasePath, ZkStackConfig, }, ContractsConfig, GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, GATEWAY_FILE, }; @@ -41,6 +41,8 @@ pub struct ChainConfigInternal { pub wallet_creation: WalletCreation, #[serde(skip_serializing_if = "Option::is_none")] pub legacy_bridge: Option, + #[serde(default)] // for backward compatibility + pub evm_emulator: bool, } /// Chain configuration file. This file is created in the chain @@ -62,6 +64,7 @@ pub struct ChainConfig { pub wallet_creation: WalletCreation, pub shell: OnceCell, pub legacy_bridge: Option, + pub evm_emulator: bool, } impl Serialize for ChainConfig { @@ -88,8 +91,8 @@ impl ChainConfig { pub fn get_wallets_config(&self) -> anyhow::Result { let path = self.configs.join(WALLETS_FILE); - if let Ok(wallets) = WalletsConfig::read(self.get_shell(), &path) { - return Ok(wallets); + if self.get_shell().path_exists(&path) { + return WalletsConfig::read(self.get_shell(), &path); } if self.wallet_creation == WalletCreation::Localhost { let wallets = create_localhost_wallets(self.get_shell(), &self.link_to_code, self.id)?; @@ -170,6 +173,7 @@ impl ChainConfig { base_token: self.base_token.clone(), wallet_creation: self.wallet_creation, legacy_bridge: self.legacy_bridge, + evm_emulator: self.evm_emulator, } } } @@ -178,4 +182,4 @@ impl FileConfigWithDefaultName for ChainConfigInternal { const FILE_NAME: &'static str = CONFIG_NAME; } -impl ZkToolboxConfig for ChainConfigInternal {} +impl ZkStackConfig for ChainConfigInternal {} diff --git a/zk_toolbox/crates/config/src/consensus_config.rs b/zkstack_cli/crates/config/src/consensus_config.rs similarity index 100% rename from zk_toolbox/crates/config/src/consensus_config.rs rename to zkstack_cli/crates/config/src/consensus_config.rs diff --git a/zk_toolbox/crates/config/src/consensus_secrets.rs b/zkstack_cli/crates/config/src/consensus_secrets.rs similarity index 67% rename from zk_toolbox/crates/config/src/consensus_secrets.rs rename to zkstack_cli/crates/config/src/consensus_secrets.rs index 0e5c4592d2f..da551a45279 100644 --- a/zk_toolbox/crates/config/src/consensus_secrets.rs +++ b/zkstack_cli/crates/config/src/consensus_secrets.rs @@ -2,13 +2,13 @@ use std::path::Path; use xshell::Shell; use zksync_config::configs::consensus::ConsensusSecrets; -use zksync_protobuf_config::decode_yaml_repr; +use zksync_protobuf_config::read_yaml_repr; use crate::traits::ReadConfig; impl ReadConfig for ConsensusSecrets { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/consts.rs b/zkstack_cli/crates/config/src/consts.rs similarity index 97% rename from zk_toolbox/crates/config/src/consts.rs rename to zkstack_cli/crates/config/src/consts.rs index 4323e3166e9..c4895b333c7 100644 --- a/zk_toolbox/crates/config/src/consts.rs +++ b/zkstack_cli/crates/config/src/consts.rs @@ -66,8 +66,6 @@ pub const DEFAULT_EXPLORER_WORKER_PORT: u16 = 3001; pub const DEFAULT_EXPLORER_API_PORT: u16 = 3002; /// Default port for the explorer data fetcher service pub const DEFAULT_EXPLORER_DATA_FETCHER_PORT: u16 = 3040; -/// Default port for consensus service -pub const DEFAULT_CONSENSUS_PORT: u16 = 3054; pub const EXPLORER_API_DOCKER_IMAGE: &str = "matterlabs/block-explorer-api"; pub const EXPLORER_DATA_FETCHER_DOCKER_IMAGE: &str = "matterlabs/block-explorer-data-fetcher"; diff --git a/zk_toolbox/crates/config/src/contracts.rs b/zkstack_cli/crates/config/src/contracts.rs similarity index 95% rename from zk_toolbox/crates/config/src/contracts.rs rename to zkstack_cli/crates/config/src/contracts.rs index 3210e4f1926..0213636c437 100644 --- a/zk_toolbox/crates/config/src/contracts.rs +++ b/zkstack_cli/crates/config/src/contracts.rs @@ -12,14 +12,14 @@ use crate::{ }, register_chain::output::RegisterChainOutput, }, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Deserialize, Serialize, Clone, Default)] pub struct ContractsConfig { pub create2_factory_addr: Address, pub create2_factory_salt: H256, - pub ecosystem_contracts: ToolboxEcosystemContracts, + pub ecosystem_contracts: EcosystemContracts, pub bridges: BridgesContracts, pub l1: L1Contracts, pub l2: L2Contracts, @@ -113,7 +113,8 @@ impl ContractsConfig { self.l1.access_control_restriction_addr = register_chain_output.access_control_restriction_addr; self.l1.chain_proxy_admin_addr = register_chain_output.chain_proxy_admin_addr; - self.l2.legacy_shared_bridge_addr = register_chain_output.l2_legacy_shared_bridge_addr; + self.l2.legacy_shared_bridge_addr = + Some(register_chain_output.l2_legacy_shared_bridge_addr); self.user_facing_diamond_proxy = register_chain_output.diamond_proxy_addr; } @@ -159,10 +160,10 @@ impl FileConfigWithDefaultName for ContractsConfig { const FILE_NAME: &'static str = CONTRACTS_FILE; } -impl ZkToolboxConfig for ContractsConfig {} +impl ZkStackConfig for ContractsConfig {} #[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Default)] -pub struct ToolboxEcosystemContracts { +pub struct EcosystemContracts { pub bridgehub_proxy_addr: Address, pub state_transition_proxy_addr: Address, pub transparent_proxy_admin_addr: Address, @@ -173,7 +174,7 @@ pub struct ToolboxEcosystemContracts { pub native_token_vault_addr: Address, } -impl ZkToolboxConfig for ToolboxEcosystemContracts {} +impl ZkStackConfig for EcosystemContracts {} #[derive(Debug, Serialize, Deserialize, Clone, Default)] pub struct BridgesContracts { @@ -213,7 +214,7 @@ pub struct L2Contracts { pub default_l2_upgrader: Address, pub da_validator_addr: Address, pub l2_native_token_vault_proxy_addr: Address, - pub legacy_shared_bridge_addr: Address, pub consensus_registry: Option
, pub multicall3: Option
, + pub legacy_shared_bridge_addr: Option
, } diff --git a/zk_toolbox/crates/config/src/docker_compose.rs b/zkstack_cli/crates/config/src/docker_compose.rs similarity index 94% rename from zk_toolbox/crates/config/src/docker_compose.rs rename to zkstack_cli/crates/config/src/docker_compose.rs index 05c6e73eaea..2208c5a8654 100644 --- a/zk_toolbox/crates/config/src/docker_compose.rs +++ b/zkstack_cli/crates/config/src/docker_compose.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Default, Serialize, Deserialize, Clone)] pub struct DockerComposeConfig { @@ -34,7 +34,7 @@ pub struct DockerComposeService { pub other: serde_json::Value, } -impl ZkToolboxConfig for DockerComposeConfig {} +impl ZkStackConfig for DockerComposeConfig {} impl DockerComposeConfig { pub fn add_service(&mut self, name: &str, service: DockerComposeService) { diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zkstack_cli/crates/config/src/ecosystem.rs similarity index 93% rename from zk_toolbox/crates/config/src/ecosystem.rs rename to zkstack_cli/crates/config/src/ecosystem.rs index 5369ccb3c86..5fe85b175de 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zkstack_cli/crates/config/src/ecosystem.rs @@ -21,7 +21,7 @@ use crate::{ input::{Erc20DeploymentConfig, InitialDeploymentConfig}, output::{ERC20Tokens, Erc20Token}, }, - traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig, ZkStackConfig}, ChainConfig, ChainConfigInternal, ContractsConfig, WalletsConfig, }; @@ -94,9 +94,9 @@ impl FileConfigWithDefaultName for EcosystemConfig { const FILE_NAME: &'static str = CONFIG_NAME; } -impl ZkToolboxConfig for EcosystemConfigInternal {} +impl ZkStackConfig for EcosystemConfigInternal {} -impl ZkToolboxConfig for EcosystemConfig {} +impl ZkStackConfig for EcosystemConfig {} impl EcosystemConfig { fn get_shell(&self) -> &Shell { @@ -146,20 +146,20 @@ impl EcosystemConfig { .unwrap_or(self.default_chain.as_ref()) } - pub fn load_chain(&self, name: Option) -> Option { + pub fn load_chain(&self, name: Option) -> anyhow::Result { let name = name.unwrap_or(self.default_chain.clone()); self.load_chain_inner(&name) } - pub fn load_current_chain(&self) -> Option { + pub fn load_current_chain(&self) -> anyhow::Result { self.load_chain_inner(self.current_chain()) } - fn load_chain_inner(&self, name: &str) -> Option { + fn load_chain_inner(&self, name: &str) -> anyhow::Result { let path = self.chains.join(name).join(CONFIG_NAME); - let config = ChainConfigInternal::read(self.get_shell(), path.clone()).ok()?; + let config = ChainConfigInternal::read(self.get_shell(), path.clone())?; - Some(ChainConfig { + Ok(ChainConfig { id: config.id, name: config.name, chain_id: config.chain_id, @@ -178,6 +178,7 @@ impl EcosystemConfig { .artifacts_path .unwrap_or_else(|| self.get_chain_artifacts_path(name)), legacy_bridge: config.legacy_bridge, + evm_emulator: config.evm_emulator, }) } @@ -196,8 +197,8 @@ impl EcosystemConfig { pub fn get_wallets(&self) -> anyhow::Result { let path = self.config.join(WALLETS_FILE); - if let Ok(wallets) = WalletsConfig::read(self.get_shell(), &path) { - return Ok(wallets); + if self.get_shell().path_exists(&path) { + return WalletsConfig::read(self.get_shell(), &path); } if self.wallet_creation == WalletCreation::Localhost { // Use 0 id for ecosystem wallets @@ -232,7 +233,11 @@ impl EcosystemConfig { } pub fn get_default_configs_path(&self) -> PathBuf { - self.link_to_code.join(CONFIGS_PATH) + Self::default_configs_path(&self.link_to_code) + } + + pub fn default_configs_path(link_to_code: &Path) -> PathBuf { + link_to_code.join(CONFIGS_PATH) } /// Path to the predefined ecosystem configs diff --git a/zk_toolbox/crates/config/src/explorer.rs b/zkstack_cli/crates/config/src/explorer.rs similarity index 98% rename from zk_toolbox/crates/config/src/explorer.rs rename to zkstack_cli/crates/config/src/explorer.rs index ee7a59e5105..7ce9b986a1e 100644 --- a/zk_toolbox/crates/config/src/explorer.rs +++ b/zkstack_cli/crates/config/src/explorer.rs @@ -8,7 +8,7 @@ use crate::{ EXPLORER_CONFIG_FILE, EXPLORER_JS_CONFIG_FILE, LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, }, - traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{ReadConfig, SaveConfig, ZkStackConfig}, }; /// Explorer JSON configuration file. This file contains configuration for the explorer app. @@ -144,4 +144,4 @@ impl Default for ExplorerConfig { } } -impl ZkToolboxConfig for ExplorerConfig {} +impl ZkStackConfig for ExplorerConfig {} diff --git a/zk_toolbox/crates/config/src/explorer_compose.rs b/zkstack_cli/crates/config/src/explorer_compose.rs similarity index 98% rename from zk_toolbox/crates/config/src/explorer_compose.rs rename to zkstack_cli/crates/config/src/explorer_compose.rs index ca9abc1e3e2..13dd665d2e3 100644 --- a/zk_toolbox/crates/config/src/explorer_compose.rs +++ b/zkstack_cli/crates/config/src/explorer_compose.rs @@ -16,7 +16,7 @@ use crate::{ EXPLORER_WORKER_DOCKER_IMAGE, LOCAL_CHAINS_PATH, LOCAL_CONFIGS_PATH, }, docker_compose::{DockerComposeConfig, DockerComposeService}, - traits::ZkToolboxConfig, + traits::ZkStackConfig, EXPLORER_BATCHES_PROCESSING_POLLING_INTERVAL, }; @@ -72,7 +72,7 @@ pub struct ExplorerBackendComposeConfig { pub docker_compose: DockerComposeConfig, } -impl ZkToolboxConfig for ExplorerBackendComposeConfig {} +impl ZkStackConfig for ExplorerBackendComposeConfig {} impl ExplorerBackendComposeConfig { const API_NAME: &'static str = "api"; diff --git a/zk_toolbox/crates/config/src/external_node.rs b/zkstack_cli/crates/config/src/external_node.rs similarity index 82% rename from zk_toolbox/crates/config/src/external_node.rs rename to zkstack_cli/crates/config/src/external_node.rs index a07ff5dc140..7d884d3e234 100644 --- a/zk_toolbox/crates/config/src/external_node.rs +++ b/zkstack_cli/crates/config/src/external_node.rs @@ -2,7 +2,7 @@ use std::path::Path; use xshell::Shell; pub use zksync_config::configs::en_config::ENConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::EN_CONFIG_FILE, @@ -23,6 +23,6 @@ impl SaveConfig for ENConfig { impl ReadConfig for ENConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/file_config.rs b/zkstack_cli/crates/config/src/file_config.rs similarity index 100% rename from zk_toolbox/crates/config/src/file_config.rs rename to zkstack_cli/crates/config/src/file_config.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs b/zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs similarity index 71% rename from zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs index 636cffc49f8..4f73483b393 100644 --- a/zk_toolbox/crates/config/src/forge_interface/accept_ownership/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/accept_ownership/mod.rs @@ -1,9 +1,9 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; -impl ZkToolboxConfig for AcceptOwnershipInput {} +impl ZkStackConfig for AcceptOwnershipInput {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct AcceptOwnershipInput { diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs similarity index 96% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs index 21c67cde5f2..17b2bac38a3 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/input.rs @@ -10,7 +10,7 @@ use zksync_basic_types::L2ChainId; use crate::{ consts::INITIAL_DEPLOYMENT_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, ContractsConfig, GenesisConfig, WalletsConfig, ERC20_DEPLOYMENT_FILE, }; @@ -61,7 +61,7 @@ impl FileConfigWithDefaultName for InitialDeploymentConfig { const FILE_NAME: &'static str = INITIAL_DEPLOYMENT_FILE; } -impl ZkToolboxConfig for InitialDeploymentConfig {} +impl ZkStackConfig for InitialDeploymentConfig {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct Erc20DeploymentConfig { @@ -72,7 +72,7 @@ impl FileConfigWithDefaultName for Erc20DeploymentConfig { const FILE_NAME: &'static str = ERC20_DEPLOYMENT_FILE; } -impl ZkToolboxConfig for Erc20DeploymentConfig {} +impl ZkStackConfig for Erc20DeploymentConfig {} impl Default for Erc20DeploymentConfig { fn default() -> Self { @@ -115,7 +115,7 @@ pub struct DeployL1Config { pub tokens: TokensDeployL1Config, } -impl ZkToolboxConfig for DeployL1Config {} +impl ZkStackConfig for DeployL1Config {} impl DeployL1Config { pub fn new( @@ -146,6 +146,7 @@ impl DeployL1Config { .diamond_init_minimal_l2_gas_price, bootloader_hash: genesis_config.bootloader_hash.unwrap(), default_aa_hash: genesis_config.default_aa_hash.unwrap(), + evm_emulator_hash: genesis_config.evm_emulator_hash, diamond_init_priority_tx_max_pubdata: initial_deployment_config .diamond_init_priority_tx_max_pubdata, diamond_init_pubdata_pricing_mode: initial_deployment_config @@ -194,6 +195,7 @@ pub struct ContractsDeployL1Config { pub diamond_init_minimal_l2_gas_price: u64, pub bootloader_hash: H256, pub default_aa_hash: H256, + pub evm_emulator_hash: Option, } #[derive(Debug, Deserialize, Serialize, Clone)] @@ -210,7 +212,7 @@ pub struct DeployErc20Config { pub additional_addresses_for_minting: Vec
, } -impl ZkToolboxConfig for DeployErc20Config {} +impl ZkStackConfig for DeployErc20Config {} impl DeployErc20Config { pub fn new( diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs similarity index 95% rename from zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs index afda8d30988..31f0ae2ddaa 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_ecosystem/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_ecosystem/output.rs @@ -5,7 +5,7 @@ use serde::{Deserialize, Serialize}; use crate::{ consts::ERC20_CONFIGS_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Deserialize, Serialize, Clone)] @@ -37,7 +37,7 @@ pub struct DeployL1DeployedAddressesOutput { pub native_token_vault_addr: Address, } -impl ZkToolboxConfig for DeployL1Output {} +impl ZkStackConfig for DeployL1Output {} #[derive(Debug, Deserialize, Serialize, Clone)] pub struct DeployL1ContractsConfigOutput { @@ -99,4 +99,4 @@ impl FileConfigWithDefaultName for ERC20Tokens { const FILE_NAME: &'static str = ERC20_CONFIGS_FILE; } -impl ZkToolboxConfig for ERC20Tokens {} +impl ZkStackConfig for ERC20Tokens {} diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs similarity index 98% rename from zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs index 88db31b7563..bcc747d797c 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/input.rs @@ -5,7 +5,7 @@ use zksync_basic_types::{H256, U256}; use zksync_config::GenesisConfig; use crate::{ - forge_interface::deploy_ecosystem::input::InitialDeploymentConfig, traits::ZkToolboxConfig, + forge_interface::deploy_ecosystem::input::InitialDeploymentConfig, traits::ZkStackConfig, ChainConfig, ContractsConfig, EcosystemConfig, }; @@ -49,7 +49,7 @@ pub struct DeployGatewayCTMInput { force_deployments_data: String, } -impl ZkToolboxConfig for DeployGatewayCTMInput {} +impl ZkStackConfig for DeployGatewayCTMInput {} impl DeployGatewayCTMInput { pub fn new( diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs similarity index 92% rename from zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs index 9cbec63f0b9..33661fb6ebe 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_gateway_ctm/output.rs @@ -1,7 +1,7 @@ use ethers::abi::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployGatewayCTMOutput { @@ -12,7 +12,7 @@ pub struct DeployGatewayCTMOutput { pub diamond_cut_data: String, } -impl ZkToolboxConfig for DeployGatewayCTMOutput {} +impl ZkStackConfig for DeployGatewayCTMOutput {} #[derive(Debug, Serialize, Deserialize, Clone)] pub struct StateTransitionDeployedAddresses { diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs similarity index 92% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs index 68b637c2d52..87014baa755 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/input.rs @@ -3,9 +3,9 @@ use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; +use crate::{traits::ZkStackConfig, ChainConfig, ContractsConfig}; -impl ZkToolboxConfig for DeployL2ContractsInput {} +impl ZkStackConfig for DeployL2ContractsInput {} /// Fields corresponding to `contracts/l1-contracts/deploy-script-config-template/config-deploy-l2-config.toml` /// which are read by `contracts/l1-contracts/deploy-scripts/DeployL2Contracts.sol`. diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs similarity index 72% rename from zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs rename to zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs index 623eb9d4d65..508e349f5ed 100644 --- a/zk_toolbox/crates/config/src/forge_interface/deploy_l2_contracts/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/deploy_l2_contracts/output.rs @@ -1,12 +1,12 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; -impl ZkToolboxConfig for InitializeBridgeOutput {} -impl ZkToolboxConfig for DefaultL2UpgradeOutput {} -impl ZkToolboxConfig for ConsensusRegistryOutput {} -impl ZkToolboxConfig for Multicall3Output {} +impl ZkStackConfig for InitializeBridgeOutput {} +impl ZkStackConfig for DefaultL2UpgradeOutput {} +impl ZkStackConfig for ConsensusRegistryOutput {} +impl ZkStackConfig for Multicall3Output {} #[derive(Debug, Clone, Serialize, Deserialize)] pub struct InitializeBridgeOutput { diff --git a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/input.rs b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/input.rs similarity index 95% rename from zk_toolbox/crates/config/src/forge_interface/gateway_preparation/input.rs rename to zkstack_cli/crates/config/src/forge_interface/gateway_preparation/input.rs index 3689c64f0ff..a958915fd9b 100644 --- a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/input.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::Bytes, Address}; use zksync_config::configs::GatewayConfig; -use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; +use crate::{traits::ZkStackConfig, ChainConfig, ContractsConfig}; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GatewayPreparationConfig { @@ -20,7 +20,8 @@ pub struct GatewayPreparationConfig { pub access_control_restriction: Address, pub l1_nullifier_proxy_addr: Address, } -impl ZkToolboxConfig for GatewayPreparationConfig {} + +impl ZkStackConfig for GatewayPreparationConfig {} impl GatewayPreparationConfig { pub fn new( diff --git a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/mod.rs b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/gateway_preparation/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/gateway_preparation/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/output.rs b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/output.rs similarity index 78% rename from zk_toolbox/crates/config/src/forge_interface/gateway_preparation/output.rs rename to zkstack_cli/crates/config/src/forge_interface/gateway_preparation/output.rs index 72373eebc53..7160a0af4c8 100644 --- a/zk_toolbox/crates/config/src/forge_interface/gateway_preparation/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/gateway_preparation/output.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{Address, H256}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GatewayPreparationOutput { @@ -10,4 +10,4 @@ pub struct GatewayPreparationOutput { pub gateway_transaction_filterer_proxy: Address, } -impl ZkToolboxConfig for GatewayPreparationOutput {} +impl ZkStackConfig for GatewayPreparationOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/mod.rs b/zkstack_cli/crates/config/src/forge_interface/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs b/zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs similarity index 83% rename from zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs index 9631fe74318..2af7502e0b7 100644 --- a/zk_toolbox/crates/config/src/forge_interface/paymaster/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/paymaster/mod.rs @@ -2,7 +2,7 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig}; +use crate::{traits::ZkStackConfig, ChainConfig}; #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployPaymasterInput { @@ -22,11 +22,11 @@ impl DeployPaymasterInput { } } -impl ZkToolboxConfig for DeployPaymasterInput {} +impl ZkStackConfig for DeployPaymasterInput {} #[derive(Debug, Serialize, Deserialize, Clone)] pub struct DeployPaymasterOutput { pub paymaster: Address, } -impl ZkToolboxConfig for DeployPaymasterOutput {} +impl ZkStackConfig for DeployPaymasterOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs similarity index 97% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs index bb6d61c6f8d..8689bb496c6 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/input.rs +++ b/zkstack_cli/crates/config/src/forge_interface/register_chain/input.rs @@ -4,7 +4,7 @@ use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use zksync_basic_types::L2ChainId; -use crate::{traits::ZkToolboxConfig, ChainConfig, ContractsConfig}; +use crate::{traits::ZkStackConfig, ChainConfig, ContractsConfig}; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct RegisterChainL1Config { @@ -59,7 +59,7 @@ pub struct ChainL1Config { pub governance_min_delay: u64, } -impl ZkToolboxConfig for RegisterChainL1Config {} +impl ZkStackConfig for RegisterChainL1Config {} impl RegisterChainL1Config { pub fn new(chain_config: &ChainConfig, contracts: &ContractsConfig) -> anyhow::Result { diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/mod.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/mod.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/mod.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs b/zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs similarity index 82% rename from zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs rename to zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs index 2281e8fc2d5..9d399ce3c25 100644 --- a/zk_toolbox/crates/config/src/forge_interface/register_chain/output.rs +++ b/zkstack_cli/crates/config/src/forge_interface/register_chain/output.rs @@ -1,7 +1,7 @@ use ethers::types::Address; use serde::{Deserialize, Serialize}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Deserialize, Serialize, Clone)] pub struct RegisterChainOutput { @@ -13,4 +13,4 @@ pub struct RegisterChainOutput { pub chain_proxy_admin_addr: Address, } -impl ZkToolboxConfig for RegisterChainOutput {} +impl ZkStackConfig for RegisterChainOutput {} diff --git a/zk_toolbox/crates/config/src/forge_interface/script_params.rs b/zkstack_cli/crates/config/src/forge_interface/script_params.rs similarity index 100% rename from zk_toolbox/crates/config/src/forge_interface/script_params.rs rename to zkstack_cli/crates/config/src/forge_interface/script_params.rs diff --git a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs b/zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs similarity index 86% rename from zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs rename to zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs index e8189c521fb..201cf86b734 100644 --- a/zk_toolbox/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs +++ b/zkstack_cli/crates/config/src/forge_interface/setup_legacy_bridge/mod.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{Address, L2ChainId, H256}; -use crate::traits::ZkToolboxConfig; +use crate::traits::ZkStackConfig; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct SetupLegacyBridgeInput { @@ -17,4 +17,4 @@ pub struct SetupLegacyBridgeInput { pub create2factory_addr: Address, } -impl ZkToolboxConfig for SetupLegacyBridgeInput {} +impl ZkStackConfig for SetupLegacyBridgeInput {} diff --git a/zk_toolbox/crates/config/src/gateway.rs b/zkstack_cli/crates/config/src/gateway.rs similarity index 92% rename from zk_toolbox/crates/config/src/gateway.rs rename to zkstack_cli/crates/config/src/gateway.rs index 37410b9a5e6..67b5ad327cc 100644 --- a/zk_toolbox/crates/config/src/gateway.rs +++ b/zkstack_cli/crates/config/src/gateway.rs @@ -3,7 +3,7 @@ use zksync_config::configs::{gateway::GatewayChainConfig, GatewayConfig}; use crate::{ forge_interface::deploy_gateway_ctm::output::DeployGatewayCTMOutput, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, GATEWAY_CHAIN_FILE, GATEWAY_FILE, }; @@ -11,7 +11,7 @@ impl FileConfigWithDefaultName for GatewayConfig { const FILE_NAME: &'static str = GATEWAY_FILE; } -impl ZkToolboxConfig for GatewayConfig {} +impl ZkStackConfig for GatewayConfig {} impl From for GatewayConfig { fn from(output: DeployGatewayCTMOutput) -> Self { @@ -43,4 +43,4 @@ impl FileConfigWithDefaultName for GatewayChainConfig { const FILE_NAME: &'static str = GATEWAY_CHAIN_FILE; } -impl ZkToolboxConfig for GatewayChainConfig {} +impl ZkStackConfig for GatewayChainConfig {} diff --git a/zk_toolbox/crates/config/src/general.rs b/zkstack_cli/crates/config/src/general.rs similarity index 96% rename from zk_toolbox/crates/config/src/general.rs rename to zkstack_cli/crates/config/src/general.rs index a8e7407edd0..0079105b66c 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zkstack_cli/crates/config/src/general.rs @@ -6,7 +6,7 @@ use url::Url; use xshell::Shell; use zksync_config::configs::object_store::ObjectStoreMode; pub use zksync_config::configs::GeneralConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENERAL_FILE, @@ -137,7 +137,7 @@ impl SaveConfig for GeneralConfig { impl ReadConfig for GeneralConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/genesis.rs b/zkstack_cli/crates/config/src/genesis.rs similarity index 64% rename from zk_toolbox/crates/config/src/genesis.rs rename to zkstack_cli/crates/config/src/genesis.rs index a6469893fed..2d9ac7fcdc6 100644 --- a/zk_toolbox/crates/config/src/genesis.rs +++ b/zkstack_cli/crates/config/src/genesis.rs @@ -1,9 +1,10 @@ use std::path::Path; +use anyhow::Context as _; use xshell::Shell; use zksync_basic_types::L1ChainId; pub use zksync_config::GenesisConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::GENESIS_FILE, @@ -11,11 +12,23 @@ use crate::{ ChainConfig, }; -pub fn update_from_chain_config(genesis: &mut GenesisConfig, config: &ChainConfig) { +pub fn update_from_chain_config( + genesis: &mut GenesisConfig, + config: &ChainConfig, +) -> anyhow::Result<()> { genesis.l2_chain_id = config.chain_id; // TODO(EVM-676): for now, the settlement layer is always the same as the L1 network genesis.l1_chain_id = L1ChainId(config.l1_network.chain_id()); genesis.l1_batch_commit_data_generator_mode = config.l1_batch_commit_data_generator_mode; + genesis.evm_emulator_hash = if config.evm_emulator { + Some(genesis.evm_emulator_hash.context( + "impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash", + )?) + } else { + None + }; + Ok(()) } impl FileConfigWithDefaultName for GenesisConfig { @@ -32,6 +45,6 @@ impl SaveConfig for GenesisConfig { impl ReadConfig for GenesisConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/lib.rs b/zkstack_cli/crates/config/src/lib.rs similarity index 90% rename from zk_toolbox/crates/config/src/lib.rs rename to zkstack_cli/crates/config/src/lib.rs index 53ac423b823..4d4fb8da61d 100644 --- a/zk_toolbox/crates/config/src/lib.rs +++ b/zkstack_cli/crates/config/src/lib.rs @@ -10,7 +10,7 @@ pub use manipulations::*; pub use secrets::*; pub use wallet_creation::*; pub use wallets::*; -pub use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +pub use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; mod apps; mod chain; diff --git a/zk_toolbox/crates/config/src/manipulations.rs b/zkstack_cli/crates/config/src/manipulations.rs similarity index 100% rename from zk_toolbox/crates/config/src/manipulations.rs rename to zkstack_cli/crates/config/src/manipulations.rs diff --git a/zk_toolbox/crates/config/src/portal.rs b/zkstack_cli/crates/config/src/portal.rs similarity index 98% rename from zk_toolbox/crates/config/src/portal.rs rename to zkstack_cli/crates/config/src/portal.rs index c787c6cc702..2b6f0ffd515 100644 --- a/zk_toolbox/crates/config/src/portal.rs +++ b/zkstack_cli/crates/config/src/portal.rs @@ -9,7 +9,7 @@ use crate::{ LOCAL_APPS_PATH, LOCAL_CONFIGS_PATH, LOCAL_GENERATED_PATH, PORTAL_CONFIG_FILE, PORTAL_JS_CONFIG_FILE, }, - traits::{ReadConfig, SaveConfig, ZkToolboxConfig}, + traits::{ReadConfig, SaveConfig, ZkStackConfig}, }; /// Portal JSON configuration file. This file contains configuration for the portal app. @@ -172,4 +172,4 @@ impl Default for PortalConfig { } } -impl ZkToolboxConfig for PortalConfig {} +impl ZkStackConfig for PortalConfig {} diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zkstack_cli/crates/config/src/secrets.rs similarity index 80% rename from zk_toolbox/crates/config/src/secrets.rs rename to zkstack_cli/crates/config/src/secrets.rs index f0a39148b03..cf0a9927c56 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zkstack_cli/crates/config/src/secrets.rs @@ -5,24 +5,22 @@ use common::db::DatabaseConfig; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; pub use zksync_config::configs::Secrets as SecretsConfig; -use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; +use zksync_protobuf_config::{encode_yaml_repr, read_yaml_repr}; use crate::{ consts::SECRETS_FILE, traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, }; -pub fn set_databases( +pub fn set_server_database( secrets: &mut SecretsConfig, server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, ) -> anyhow::Result<()> { let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Server database must be presented")?; database.server_url = Some(SensitiveUrl::from(server_db_config.full_url())); - database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } @@ -33,7 +31,7 @@ pub fn set_prover_database( let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Prover database must be presented")?; database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } @@ -61,6 +59,6 @@ impl SaveConfig for SecretsConfig { impl ReadConfig for SecretsConfig { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let path = shell.current_dir().join(path); - decode_yaml_repr::(&path, false) + read_yaml_repr::(&path, false) } } diff --git a/zk_toolbox/crates/config/src/traits.rs b/zkstack_cli/crates/config/src/traits.rs similarity index 95% rename from zk_toolbox/crates/config/src/traits.rs rename to zkstack_cli/crates/config/src/traits.rs index bb0722762e3..a4a4ad22c61 100644 --- a/zk_toolbox/crates/config/src/traits.rs +++ b/zkstack_cli/crates/config/src/traits.rs @@ -8,8 +8,8 @@ use serde::{de::DeserializeOwned, Serialize}; use url::Url; use xshell::Shell; -// Configs that we use only inside zk toolbox, we don't have protobuf implementation for them. -pub trait ZkToolboxConfig {} +// Configs that we use only inside ZK Stack CLI, we don't have protobuf implementation for them. +pub trait ZkStackConfig {} pub trait FileConfigWithDefaultName { const FILE_NAME: &'static str; @@ -19,7 +19,7 @@ pub trait FileConfigWithDefaultName { } } -impl SaveConfig for T { +impl SaveConfig for T { fn save(&self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { save_with_comment(shell, path, self, "") } @@ -49,7 +49,7 @@ pub trait ReadConfig: Sized { impl ReadConfig for T where - T: DeserializeOwned + Clone + ZkToolboxConfig, + T: DeserializeOwned + Clone + ZkStackConfig, { fn read(shell: &Shell, path: impl AsRef) -> anyhow::Result { let error_context = || format!("Failed to parse config file {:?}.", path.as_ref()); diff --git a/zk_toolbox/crates/config/src/wallet_creation.rs b/zkstack_cli/crates/config/src/wallet_creation.rs similarity index 100% rename from zk_toolbox/crates/config/src/wallet_creation.rs rename to zkstack_cli/crates/config/src/wallet_creation.rs diff --git a/zk_toolbox/crates/config/src/wallets.rs b/zkstack_cli/crates/config/src/wallets.rs similarity index 75% rename from zk_toolbox/crates/config/src/wallets.rs rename to zkstack_cli/crates/config/src/wallets.rs index 9c87453954e..735848f6e34 100644 --- a/zk_toolbox/crates/config/src/wallets.rs +++ b/zkstack_cli/crates/config/src/wallets.rs @@ -1,11 +1,10 @@ use common::wallets::Wallet; -use ethers::types::H256; -use rand::Rng; +use rand::{CryptoRng, Rng}; use serde::{Deserialize, Serialize}; use crate::{ consts::WALLETS_FILE, - traits::{FileConfigWithDefaultName, ZkToolboxConfig}, + traits::{FileConfigWithDefaultName, ZkStackConfig}, }; #[derive(Debug, Clone, Serialize, Deserialize)] @@ -20,7 +19,7 @@ pub struct WalletsConfig { impl WalletsConfig { /// Generate random wallets - pub fn random(rng: &mut impl Rng) -> Self { + pub fn random(rng: &mut (impl CryptoRng + Rng)) -> Self { Self { deployer: Some(Wallet::random(rng)), operator: Wallet::random(rng), @@ -42,13 +41,6 @@ impl WalletsConfig { token_multiplier_setter: Some(Wallet::empty()), } } - pub fn deployer_private_key(&self) -> Option { - self.deployer.as_ref().and_then(|wallet| wallet.private_key) - } - - pub fn governor_private_key(&self) -> Option { - self.governor.private_key - } } impl FileConfigWithDefaultName for WalletsConfig { @@ -63,6 +55,6 @@ pub(crate) struct EthMnemonicConfig { pub(crate) base_path: String, } -impl ZkToolboxConfig for EthMnemonicConfig {} +impl ZkStackConfig for EthMnemonicConfig {} -impl ZkToolboxConfig for WalletsConfig {} +impl ZkStackConfig for WalletsConfig {} diff --git a/zk_toolbox/crates/git_version_macro/Cargo.toml b/zkstack_cli/crates/git_version_macro/Cargo.toml similarity index 100% rename from zk_toolbox/crates/git_version_macro/Cargo.toml rename to zkstack_cli/crates/git_version_macro/Cargo.toml diff --git a/zk_toolbox/crates/git_version_macro/src/lib.rs b/zkstack_cli/crates/git_version_macro/src/lib.rs similarity index 100% rename from zk_toolbox/crates/git_version_macro/src/lib.rs rename to zkstack_cli/crates/git_version_macro/src/lib.rs diff --git a/zk_toolbox/crates/types/Cargo.toml b/zkstack_cli/crates/types/Cargo.toml similarity index 100% rename from zk_toolbox/crates/types/Cargo.toml rename to zkstack_cli/crates/types/Cargo.toml diff --git a/zk_toolbox/crates/types/src/base_token.rs b/zkstack_cli/crates/types/src/base_token.rs similarity index 100% rename from zk_toolbox/crates/types/src/base_token.rs rename to zkstack_cli/crates/types/src/base_token.rs diff --git a/zk_toolbox/crates/types/src/l1_network.rs b/zkstack_cli/crates/types/src/l1_network.rs similarity index 100% rename from zk_toolbox/crates/types/src/l1_network.rs rename to zkstack_cli/crates/types/src/l1_network.rs diff --git a/zk_toolbox/crates/types/src/lib.rs b/zkstack_cli/crates/types/src/lib.rs similarity index 71% rename from zk_toolbox/crates/types/src/lib.rs rename to zkstack_cli/crates/types/src/lib.rs index 8b647057105..075e39345bc 100644 --- a/zk_toolbox/crates/types/src/lib.rs +++ b/zkstack_cli/crates/types/src/lib.rs @@ -10,5 +10,5 @@ pub use prover_mode::*; pub use token_info::*; pub use wallet_creation::*; pub use zksync_basic_types::{ - commitment::L1BatchCommitmentMode, protocol_version::ProtocolSemanticVersion, + commitment::L1BatchCommitmentMode, parse_h256, protocol_version::ProtocolSemanticVersion, }; diff --git a/zk_toolbox/crates/types/src/prover_mode.rs b/zkstack_cli/crates/types/src/prover_mode.rs similarity index 100% rename from zk_toolbox/crates/types/src/prover_mode.rs rename to zkstack_cli/crates/types/src/prover_mode.rs diff --git a/zk_toolbox/crates/types/src/token_info.rs b/zkstack_cli/crates/types/src/token_info.rs similarity index 100% rename from zk_toolbox/crates/types/src/token_info.rs rename to zkstack_cli/crates/types/src/token_info.rs diff --git a/zk_toolbox/crates/types/src/wallet_creation.rs b/zkstack_cli/crates/types/src/wallet_creation.rs similarity index 100% rename from zk_toolbox/crates/types/src/wallet_creation.rs rename to zkstack_cli/crates/types/src/wallet_creation.rs diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zkstack_cli/crates/zkstack/Cargo.toml similarity index 69% rename from zk_toolbox/crates/zk_inception/Cargo.toml rename to zkstack_cli/crates/zkstack/Cargo.toml index c95b2256f58..0a66036854e 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zkstack_cli/crates/zkstack/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "zk_inception" +name = "zkstack" version = "0.1.0" edition.workspace = true homepage.workspace = true @@ -12,34 +12,51 @@ keywords.workspace = true [dependencies] anyhow.workspace = true +chrono.workspace = true clap.workspace = true +clap_complete.workspace = true +clap-markdown.workspace = true cliclack.workspace = true +common.workspace = true config.workspace = true +dirs.workspace = true +ethers.workspace = true +futures.workspace = true human-panic.workspace = true lazy_static.workspace = true -serde_yaml.workspace = true +secrecy.workspace = true serde.workspace = true serde_json.workspace = true -xshell.workspace = true -ethers.workspace = true -common.workspace = true -tokio.workspace = true -types.workspace = true +serde_yaml.workspace = true +slugify-rs.workspace = true strum.workspace = true +sqruff-lib = "0.19.0" +thiserror.workspace = true +tokio.workspace = true toml.workspace = true +types.workspace = true url.workspace = true -thiserror.workspace = true -zksync_config.workspace = true -zksync_system_constants.workspace = true -slugify-rs.workspace = true +xshell.workspace = true zksync_basic_types.workspace = true -clap-markdown.workspace = true +zksync_config.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_crypto.workspace = true +zksync_protobuf.workspace = true +zksync_protobuf_config.workspace = true zksync_types.workspace = true zksync_web3_decl.workspace = true -secrecy.workspace = true +zksync_system_constants.workspace = true +prost.workspace = true +reqwest = "0.12.8" + +[dev-dependencies] +rand.workspace = true +zksync_consensus_utils.workspace = true [build-dependencies] -eyre.workspace = true +anyhow.workspace = true +clap_complete.workspace = true +dirs.workspace = true ethers.workspace = true +xshell.workspace = true +zksync_protobuf_build.workspace = true diff --git a/zk_toolbox/crates/zk_inception/README.md b/zkstack_cli/crates/zkstack/README.md similarity index 89% rename from zk_toolbox/crates/zk_inception/README.md rename to zkstack_cli/crates/zkstack/README.md index 904b1421e3a..f352d96fec4 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zkstack_cli/crates/zkstack/README.md @@ -17,8 +17,12 @@ This document contains the help content for the `zk_inception` command-line prog - [`zk_inception chain initialize-bridges`↴](#zk_inception-chain-initialize-bridges) - [`zk_inception chain deploy-l2-contracts`↴](#zk_inception-chain-deploy-l2-contracts) - [`zk_inception chain upgrader`↴](#zk_inception-chain-upgrader) +- [`zk_inception chain deploy-consensus-registry`↴](#zk_inception-chain-deploy-consensus-registry) +- [`zk_inception chain deploy-multicall3`↴](#zk_inception-chain-deploy-multicall3) - [`zk_inception chain deploy-paymaster`↴](#zk_inception-chain-deploy-paymaster) - [`zk_inception chain update-token-multiplier-setter`↴](#zk_inception-chain-update-token-multiplier-setter) +- [`zk_inception consensus set-attester-committee`↴](#zk_inception-consensus-set-attester-committee) +- [`zk_inception consensus get-attester-committee`↴](#zk_inception-consensus-get-attester-committee) - [`zk_inception prover`↴](#zk_inception-prover) - [`zk_inception prover init`↴](#zk_inception-prover-init) - [`zk_inception prover setup-keys`↴](#zk_inception-prover-setup-keys) @@ -38,7 +42,7 @@ This document contains the help content for the `zk_inception` command-line prog ## `zk_inception` -ZK Toolbox is a set of tools for working with zk stack. +ZK Stack CLI is a set of tools for working with zk stack. **Usage:** `zk_inception [OPTIONS] ` @@ -364,6 +368,18 @@ Deploy Default Upgrader e.g.: `zk_inception init -a --private-key=` +## `zk_inception chain deploy-consensus-registry` + +Deploy Consensus Registry smart contract + +**Usage:** `zk_inception chain deploy-consensus-registry` + +## `zk_inception chain deploy-multicall3` + +Deploy Multicall3 smart contract + +**Usage:** `zk_inception chain deploy-multicall3` + ## `zk_inception chain deploy-paymaster` Deploy paymaster smart contract @@ -414,6 +430,47 @@ Update Token Multiplier Setter address on L1 e.g.: `zk_inception init -a --private-key=` +## `zk_inception consensus` + +Consensus related commands + +**Usage:** `zk_inception consensus ` + +###### **Subcommands:** + +- `set-attester-committee` — Set attester committee +- `get-attester-committee` — Get attester committee + +## `zk_inception consensus set-attester-committee` + +Set attester committee in the consensus registry smart contract. Requires `consensus_registry` and `multicall3` +contracts to be deployed. + +**Usage:** `zk_inception consensus set-attester-committee [OPTIONS]` + +###### **Options:** + +- `--from-genesis` — Set attester committee to `consensus.genesis_spec.attesters` in general.yaml Mutually exclusive + with `--from-file`. +- `--from-file ` — Set attester committee to committee specified in yaml file at `PATH`. + Mutually exclusive with `--from-genesis`. File format is specified in + `zk_inception/src/commands/consensus/proto/mod.proto`. Example: + + ```yaml + attesters: + - key: attester:public:secp256k1:0339d4b0cdd9896d3929631a4e5e9a5b4919f52592bec571d70bb0e50a3a824714 + weight: 1 + - key: attester:public:secp256k1:024897d8c10d7a57d108cfe2a724d7824c657f219ef5d9f7674810a6746c19fa7b + weight: 1 + ``` + +## `zk_inception consensus get-attester-committee` + +Requires `consensus_registry` and `multicall3` contracts to be deployed. Fetches attester committee from the consensus +registry contract and prints it. + +**Usage:** `zk_inception consensus get-attester-committee` + ## `zk_inception prover` Prover related commands @@ -423,7 +480,6 @@ Prover related commands ###### **Subcommands:** - `init` — Initialize prover -- `generate-sk` — Generate setup keys - `run` — Run prover - `init-bellman-cuda` — Initialize bellman-cuda @@ -452,7 +508,11 @@ Initialize prover - `--public-location ` - `--public-project-id ` - `--bellman-cuda-dir ` -- `--download-key ` +- `--bellman-cuda` + + Possible values: `true`, `false` + +- `--setup-compressor-key ` Possible values: `true`, `false` @@ -508,6 +568,10 @@ Run prover Possible values: `true`, `false` +- `--tag' - Tag of the docker image to run. + + Default value is `latest2.0` but you can specify your prefered one. + - `--round ` Possible values: `all-rounds`, `basic-circuits`, `leaf-aggregation`, `node-aggregation`, `recursion-tip`, `scheduler` diff --git a/zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json b/zkstack_cli/crates/zkstack/abi/ConsensusRegistry.json similarity index 100% rename from zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json rename to zkstack_cli/crates/zkstack/abi/ConsensusRegistry.json diff --git a/zkstack_cli/crates/zkstack/build.rs b/zkstack_cli/crates/zkstack/build.rs new file mode 100644 index 00000000000..e52e952bf73 --- /dev/null +++ b/zkstack_cli/crates/zkstack/build.rs @@ -0,0 +1,149 @@ +use std::path::{Path, PathBuf}; + +use anyhow::{anyhow, Context}; +use ethers::contract::Abigen; +use xshell::{cmd, Shell}; + +const COMPLETION_DIR: &str = "completion"; + +fn main() -> anyhow::Result<()> { + let outdir = PathBuf::from(std::env::var("OUT_DIR")?).canonicalize()?; + Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json") + .map_err(|_| anyhow!("Failed ABI deserialization"))? + .generate() + .map_err(|_| anyhow!("Failed ABI generation"))? + .write_to_file(outdir.join("consensus_registry_abi.rs")) + .context("Failed to write ABI to file")?; + + if let Err(e) = build_dependencies() { + println!("cargo:error=It was not possible to install projects dependencies"); + println!("cargo:error={}", e); + } + + if let Err(e) = configure_shell_autocompletion() { + println!("cargo:warning=It was not possible to install autocomplete scripts. Please generate them manually with `zkstack autocomplete`"); + println!("cargo:error={}", e); + }; + + zksync_protobuf_build::Config { + input_root: "src/commands/consensus/proto".into(), + proto_root: "zksync/toolbox/consensus".into(), + dependencies: vec!["::zksync_protobuf_config::proto".parse().unwrap()], + protobuf_crate: "::zksync_protobuf".parse().unwrap(), + is_public: false, + } + .generate() + .unwrap(); + Ok(()) +} + +fn configure_shell_autocompletion() -> anyhow::Result<()> { + // Array of supported shells + let shells = [ + clap_complete::Shell::Bash, + clap_complete::Shell::Fish, + clap_complete::Shell::Zsh, + ]; + + for shell in shells { + std::fs::create_dir_all(&shell.autocomplete_folder()?) + .context("it was impossible to create the configuration directory")?; + + let src = Path::new(COMPLETION_DIR).join(shell.autocomplete_file_name()?); + let dst = shell + .autocomplete_folder()? + .join(shell.autocomplete_file_name()?); + + std::fs::copy(src, dst)?; + + shell + .configure_autocomplete() + .context("failed to run extra configuration requirements")?; + } + + Ok(()) +} + +pub trait ShellAutocomplete { + fn autocomplete_folder(&self) -> anyhow::Result; + fn autocomplete_file_name(&self) -> anyhow::Result; + /// Extra steps required for shells enable command autocomplete. + fn configure_autocomplete(&self) -> anyhow::Result<()>; +} + +impl ShellAutocomplete for clap_complete::Shell { + fn autocomplete_folder(&self) -> anyhow::Result { + let home_dir = dirs::home_dir().context("missing home folder")?; + + match self { + clap_complete::Shell::Bash => Ok(home_dir.join(".bash_completion.d")), + clap_complete::Shell::Fish => Ok(home_dir.join(".config/fish/completions")), + clap_complete::Shell::Zsh => Ok(home_dir.join(".zsh/completion")), + _ => anyhow::bail!("unsupported shell"), + } + } + + fn autocomplete_file_name(&self) -> anyhow::Result { + let crate_name = env!("CARGO_PKG_NAME"); + + match self { + clap_complete::Shell::Bash => Ok(format!("{}.sh", crate_name)), + clap_complete::Shell::Fish => Ok(format!("{}.fish", crate_name)), + clap_complete::Shell::Zsh => Ok(format!("_{}.zsh", crate_name)), + _ => anyhow::bail!("unsupported shell"), + } + } + + fn configure_autocomplete(&self) -> anyhow::Result<()> { + match self { + clap_complete::Shell::Bash | clap_complete::Shell::Zsh => { + let shell = &self.to_string().to_lowercase(); + let completion_file = self + .autocomplete_folder()? + .join(self.autocomplete_file_name()?); + + // Source the completion file inside .{shell}rc + let shell_rc = dirs::home_dir() + .context("missing home directory")? + .join(format!(".{}rc", shell)); + + if shell_rc.exists() { + let shell_rc_content = std::fs::read_to_string(&shell_rc) + .context(format!("could not read .{}rc", shell))?; + + if !shell_rc_content.contains("# zkstack completion") { + std::fs::write( + shell_rc, + format!( + "{}\n# zkstack completion\nsource \"{}\"\n", + shell_rc_content, + completion_file.to_str().unwrap() + ), + ) + .context(format!("could not write .{}rc", shell))?; + } + } else { + println!( + "cargo:warning=Please add the following line to your .{}rc:", + shell + ); + println!("cargo:warning=source {}", completion_file.to_str().unwrap()); + } + } + _ => (), + } + + Ok(()) + } +} + +fn build_dependencies() -> anyhow::Result<()> { + let shell = Shell::new()?; + let code_dir = Path::new("../"); + + let _dir_guard = shell.push_dir(code_dir); + + cmd!(shell, "yarn install") + .run() + .context("Failed to install dependencies") +} diff --git a/zkstack_cli/crates/zkstack/completion/_zkstack.zsh b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh new file mode 100644 index 00000000000..3ea3980e68f --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/_zkstack.zsh @@ -0,0 +1,5151 @@ +#compdef zkstack + +autoload -U is-at-least + +_zkstack() { + typeset -A opt_args + typeset -a _arguments_options + local ret=1 + + if is-at-least 5.2; then + _arguments_options=(-s -S -C) + else + _arguments_options=(-s -C) + fi + + local context curcontext="$curcontext" state line + _arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +'-V[Print version]' \ +'--version[Print version]' \ +":: :_zkstack_commands" \ +"*::: :->zkstack" \ +&& ret=0 + case $state in + (zkstack) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-command-$line[1]:" + case $line[1] in + (autocomplete) +_arguments "${_arguments_options[@]}" : \ +'--generate=[The shell to generate the autocomplete script for]:GENERATOR:(bash elvish fish powershell zsh)' \ +'-o+[The out directory to write the autocomplete script to]:OUT:_files' \ +'--out=[The out directory to write the autocomplete script to]:OUT:_files' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(ecosystem) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__ecosystem_commands" \ +"*::: :->ecosystem" \ +&& ret=0 + + case $state in + (ecosystem) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-ecosystem-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--ecosystem-name=[]:ECOSYSTEM_NAME: ' \ +'--l1-network=[L1 Network]:L1_NETWORK:(localhost sepolia holesky mainnet)' \ +'--link-to-code=[Code link]:LINK_TO_CODE:_files -/' \ +'--chain-name=[]:CHAIN_NAME: ' \ +'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ +'--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" +random\:"Generate random wallets" +empty\:"Generate placeholder wallets" +in-file\:"Specify file with wallets"))' \ +'--wallet-path=[Wallet path]:WALLET_PATH:_files' \ +'--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ +'--start-containers=[Start reth and postgres containers after creation]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--legacy-bridge[]' \ +'--skip-submodules-checkout[Skip submodules checkout]' \ +'--skip-contract-compilation-override[Skip contract compilation override]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +'--sender=[Address of the transaction sender]:SENDER: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'-o+[Output directory for the generated files]:OUT:_files' \ +'--out=[Output directory for the generated files]:OUT:_files' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--deploy-erc20=[Deploy ERC20 contracts]' \ +'--deploy-ecosystem=[Deploy ecosystem contracts]' \ +'--ecosystem-contracts-path=[Path to ecosystem contracts]:ECOSYSTEM_CONTRACTS_PATH:_files' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--deploy-paymaster=[Deploy Paymaster contract]' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'-o+[Enable Grafana]' \ +'--observability=[Enable Grafana]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-d[]' \ +'--dont-drop[]' \ +'--ecosystem-only[Initialize ecosystem only and skip chain initialization (chain can be initialized later with \`chain init\` subcommand)]' \ +'--dev[Use defaults for all options and flags. Suitable for local development]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'--skip-submodules-checkout[Skip submodules checkout]' \ +'--skip-contract-compilation-override[Skip contract compilation override]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +'::name:' \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__ecosystem__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-ecosystem-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(chain) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__chain_commands" \ +"*::: :->chain" \ +&& ret=0 + + case $state in + (chain) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--chain-name=[]:CHAIN_NAME: ' \ +'--chain-id=[Chain ID]:CHAIN_ID: ' \ +'--prover-mode=[Prover options]:PROVER_MODE:(no-proofs gpu)' \ +'--wallet-creation=[Wallet options]:WALLET_CREATION:((localhost\:"Load wallets from localhost mnemonic, they are funded for localhost env" +random\:"Generate random wallets" +empty\:"Generate placeholder wallets" +in-file\:"Specify file with wallets"))' \ +'--wallet-path=[Wallet path]:WALLET_PATH:_files' \ +'--l1-batch-commit-data-generator-mode=[Commit data generation mode]:L1_BATCH_COMMIT_DATA_GENERATOR_MODE:(rollup validium)' \ +'--base-token-address=[Base token address]:BASE_TOKEN_ADDRESS: ' \ +'--base-token-price-nominator=[Base token nominator]:BASE_TOKEN_PRICE_NOMINATOR: ' \ +'--base-token-price-denominator=[Base token denominator]:BASE_TOKEN_PRICE_DENOMINATOR: ' \ +'--set-as-default=[Set as default chain]' \ +'--evm-emulator=[Enable EVM emulator]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--legacy-bridge[]' \ +'--skip-submodules-checkout[Skip submodules checkout]' \ +'--skip-contract-compilation-override[Skip contract compilation override]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +'-o+[Output directory for the generated files]:OUT:_files' \ +'--out=[Output directory for the generated files]:OUT:_files' \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--deploy-paymaster=[]' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-d[]' \ +'--dont-drop[]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'--dev[Use defaults for all options and flags. Suitable for local development]' \ +'--skip-submodules-checkout[Skip submodules checkout]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +":: :_zkstack__chain__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--l1-rpc-url=[L1 RPC URL]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'--no-port-reallocation[Do not reallocate ports]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__init__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-init-help-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__chain__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +'--server-db-url=[Server database url without database name]:SERVER_DB_URL: ' \ +'--server-db-name=[Server database name]:SERVER_DB_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-d[Use default database urls and names]' \ +'--dev[Use default database urls and names]' \ +'-d[]' \ +'--dont-drop[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__genesis__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-genesis-help-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(convert-to-gateway) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(migrate-to-gateway) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--gateway-chain-name=[]:GATEWAY_CHAIN_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(migrate-from-gateway) +_arguments "${_arguments_options[@]}" : \ +'--verify=[Verify deployed contracts]' \ +'--verifier=[Verifier to use]:VERIFIER:(etherscan sourcify blockscout oklink)' \ +'--verifier-url=[Verifier URL, if using a custom provider]:VERIFIER_URL: ' \ +'--verifier-api-key=[Verifier API key]:VERIFIER_API_KEY: ' \ +'*-a+[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[List of additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--gateway-chain-name=[]:GATEWAY_CHAIN_NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--resume[]' \ +'--zksync[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help (see more with '\''--help'\'')]' \ +'--help[Print help (see more with '\''--help'\'')]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__chain__help__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-chain-help-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(convert-to-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate-to-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate-from-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(dev) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev_commands" \ +"*::: :->dev" \ +&& ret=0 + + case $state in + (dev) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +'--database=[Database to create new migration for]:DATABASE:(prover core)' \ +'--name=[Migration name]:NAME: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +'-p+[Prover database]' \ +'--prover=[Prover database]' \ +'--prover-url=[URL of the Prover database. If not specified, it is used from the current chain'\''s secrets]:PROVER_URL: ' \ +'-c+[Core database]' \ +'--core=[Core database]' \ +'--core-url=[URL of the Core database. If not specified, it is used from the current chain'\''s secrets.]:CORE_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__database__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-database-help-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +'-t+[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ +'--test-pattern=[Run just the tests matching a pattern. Same as the -t flag on jest.]:TEST_PATTERN: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-e[Run tests for external node]' \ +'--external-node[Run tests for external node]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'--enable-consensus[Enable consensus]' \ +'-e[Run tests for external node]' \ +'--external-node[Run tests for external node]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-s[Run recovery from a snapshot instead of genesis]' \ +'--snapshot[Run recovery from a snapshot instead of genesis]' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'--no-kill[The test will not kill all the nodes during execution]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-n[Do not install or build dependencies]' \ +'--no-deps[Do not install or build dependencies]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +'--options=[Cargo test flags]:OPTIONS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__test__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-test-help-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__clean__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-clean-help-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__snapshot__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-snapshot-help-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +'*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-c[]' \ +'--check[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-c[]' \ +'--check[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +'*-t+[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'*--targets=[]:TARGETS:(md sol js ts rs contracts autocompletion)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__fmt__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-fmt-help-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +'--number=[]:NUMBER: ' \ +'--version=[]:VERSION: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--default[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +'--version=[]:VERSION: ' \ +'--snark-wrapper=[]:SNARK_WRAPPER: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--default[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__prover__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-prover-help-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +'--l1-contracts=[Build L1 contracts]' \ +'--l1-da-contracts=[Build L1 DA contracts]' \ +'--l2-contracts=[Build L2 contracts]' \ +'--system-contracts=[Build system contracts]' \ +'--test-contracts=[Build test contracts]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +'-p+[Path to the config file to override]:PATH: ' \ +'--path=[Path to the config file to override]:PATH: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +'--file=[]:FILE:_files' \ +'--private-key=[]:PRIVATE_KEY: ' \ +'--l1-rpc-url=[]:L1_RPC_URL: ' \ +'--confirmations=[]:CONFIRMATIONS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +'-u+[URL of the health check endpoint]:URL: ' \ +'--url=[URL of the health check endpoint]:URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__dev__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__status__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-status-help-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__dev__help__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-dev-help-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-prover-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +'--proof-store-dir=[]:PROOF_STORE_DIR: ' \ +'--bucket-base-url=[]:BUCKET_BASE_URL: ' \ +'--credentials-file=[]:CREDENTIALS_FILE: ' \ +'--bucket-name=[]:BUCKET_NAME: ' \ +'--location=[]:LOCATION: ' \ +'--project-id=[]:PROJECT_ID: ' \ +'--shall-save-to-public-bucket=[]:SHALL_SAVE_TO_PUBLIC_BUCKET:(true false)' \ +'--public-store-dir=[]:PUBLIC_STORE_DIR: ' \ +'--public-bucket-base-url=[]:PUBLIC_BUCKET_BASE_URL: ' \ +'--public-credentials-file=[]:PUBLIC_CREDENTIALS_FILE: ' \ +'--public-bucket-name=[]:PUBLIC_BUCKET_NAME: ' \ +'--public-location=[]:PUBLIC_LOCATION: ' \ +'--public-project-id=[]:PUBLIC_PROJECT_ID: ' \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ +'--bellman-cuda=[]' \ +'--setup-compressor-key=[]' \ +'--path=[]:PATH: ' \ +'--region=[]:REGION:(us europe asia)' \ +'--mode=[]:MODE:(download generate)' \ +'--setup-keys=[]' \ +'--setup-database=[]:SETUP_DATABASE:(true false)' \ +'--prover-db-url=[Prover database url without database name]:PROVER_DB_URL: ' \ +'--prover-db-name=[Prover database name]:PROVER_DB_NAME: ' \ +'-u+[Use default database urls and names]:USE_DEFAULT:(true false)' \ +'--use-default=[Use default database urls and names]:USE_DEFAULT:(true false)' \ +'-d+[]:DONT_DROP:(true false)' \ +'--dont-drop=[]:DONT_DROP:(true false)' \ +'--cloud-type=[]:CLOUD_TYPE:(gcp local)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--dev[]' \ +'(--bellman-cuda-dir)--clone[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +'--region=[]:REGION:(us europe asia)' \ +'--mode=[]:MODE:(download generate)' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'--component=[]:COMPONENT:(gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor)' \ +'--round=[]:ROUND:(all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler)' \ +'--threads=[]:THREADS: ' \ +'--max-allocation=[Memory allocation limit in bytes (for prover component)]:MAX_ALLOCATION: ' \ +'--witness-vector-generator-count=[]:WITNESS_VECTOR_GENERATOR_COUNT: ' \ +'--max-allocation=[]:MAX_ALLOCATION: ' \ +'--docker=[]:DOCKER:(true false)' \ +'--tag=[]:TAG: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +'(--clone)--bellman-cuda-dir=[]:BELLMAN_CUDA_DIR: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'(--bellman-cuda-dir)--clone[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +'--path=[]:PATH: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__prover__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-prover-help-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(server) +_arguments "${_arguments_options[@]}" : \ +'*--components=[Components of server to run]:COMPONENTS: ' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--genesis[Run server in genesis mode]' \ +'--build[Build server but don'\''t run it]' \ +'--uring[Enables uring support for RocksDB]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(external-node) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__external-node_commands" \ +"*::: :->external-node" \ +&& ret=0 + + case $state in + (external-node) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-external-node-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +'--db-url=[]:DB_URL: ' \ +'--db-name=[]:DB_NAME: ' \ +'--l1-rpc-url=[]:L1_RPC_URL: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-u[Use default database urls and names]' \ +'--use-default[Use default database urls and names]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'*--components=[Components of server to run]:COMPONENTS: ' \ +'--enable-consensus=[Enable consensus]' \ +'*-a+[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'*--additional-args=[Additional arguments that can be passed through the CLI]:ADDITIONAL_ARGS: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--reinit[]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__external-node__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-external-node-help-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +'-o+[Enable Grafana]' \ +'--observability=[Enable Grafana]' \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(contract-verifier) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__contract-verifier_commands" \ +"*::: :->contract-verifier" \ +&& ret=0 + + case $state in + (contract-verifier) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-contract-verifier-command-$line[1]:" + case $line[1] in + (run) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +'--zksolc-version=[Version of zksolc to install]:ZKSOLC_VERSION: ' \ +'--zkvyper-version=[Version of zkvyper to install]:ZKVYPER_VERSION: ' \ +'--solc-version=[Version of solc to install]:SOLC_VERSION: ' \ +'--era-vm-solc-version=[Version of era vm solc to install]:ERA_VM_SOLC_VERSION: ' \ +'--vyper-version=[Version of vyper to install]:VYPER_VERSION: ' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--only[Install only provided compilers]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__contract-verifier__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-contract-verifier-help-command-$line[1]:" + case $line[1] in + (run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(portal) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(explorer) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__explorer_commands" \ +"*::: :->explorer" \ +&& ret=0 + + case $state in + (explorer) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-explorer-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__explorer__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-explorer-help-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(consensus) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +":: :_zkstack__consensus_commands" \ +"*::: :->consensus" \ +&& ret=0 + + case $state in + (consensus) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-consensus-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +'--from-file=[Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in \`commands/consensus/proto/mod.proto\`]:FROM_FILE:_files' \ +'--chain=[Chain to use]:CHAIN: ' \ +'--from-genesis[Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__consensus__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-consensus-help-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +;; +(update) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-c[Update only the config files]' \ +'--only-config[Update only the config files]' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(markdown) +_arguments "${_arguments_options[@]}" : \ +'--chain=[Chain to use]:CHAIN: ' \ +'-v[Verbose mode]' \ +'--verbose[Verbose mode]' \ +'--ignore-prerequisites[Ignores prerequisites checks]' \ +'-h[Print help]' \ +'--help[Print help]' \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help_commands" \ +"*::: :->help" \ +&& ret=0 + + case $state in + (help) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-command-$line[1]:" + case $line[1] in + (autocomplete) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(ecosystem) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__ecosystem_commands" \ +"*::: :->ecosystem" \ +&& ret=0 + + case $state in + (ecosystem) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-ecosystem-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(change-default-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-observability) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(chain) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain_commands" \ +"*::: :->chain" \ +&& ret=0 + + case $state in + (chain) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain__init_commands" \ +"*::: :->init" \ +&& ret=0 + + case $state in + (init) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-init-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(genesis) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__chain__genesis_commands" \ +"*::: :->genesis" \ +&& ret=0 + + case $state in + (genesis) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-chain-genesis-command-$line[1]:" + case $line[1] in + (init-database) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(register-chain) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-l2-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(accept-chain-ownership) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(initialize-bridges) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-consensus-registry) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-multicall3) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-upgrader) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(deploy-paymaster) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(update-token-multiplier-setter) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(convert-to-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate-to-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate-from-gateway) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(dev) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev_commands" \ +"*::: :->dev" \ +&& ret=0 + + case $state in + (dev) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-command-$line[1]:" + case $line[1] in + (database) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__database_commands" \ +"*::: :->database" \ +&& ret=0 + + case $state in + (database) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-database-command-$line[1]:" + case $line[1] in + (check-sqlx-data) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(drop) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(migrate) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(new-migration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prepare) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(reset) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(test) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__test_commands" \ +"*::: :->test" \ +&& ret=0 + + case $state in + (test) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-test-command-$line[1]:" + case $line[1] in + (integration) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fees) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(revert) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(recovery) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(upgrade) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(build) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(rust) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(l1-contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(wallet) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(loadtest) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(clean) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__clean_commands" \ +"*::: :->clean" \ +&& ret=0 + + case $state in + (clean) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-clean-command-$line[1]:" + case $line[1] in + (all) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contracts-cache) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(snapshot) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__snapshot_commands" \ +"*::: :->snapshot" \ +&& ret=0 + + case $state in + (snapshot) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-snapshot-command-$line[1]:" + case $line[1] in + (create) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(lint) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(fmt) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__fmt_commands" \ +"*::: :->fmt" \ +&& ret=0 + + case $state in + (fmt) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-fmt-command-$line[1]:" + case $line[1] in + (rustfmt) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(prettier) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-prover-command-$line[1]:" + case $line[1] in + (info) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-batch) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(insert-version) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(contracts) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(config-writer) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(send-transactions) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(status) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__dev__status_commands" \ +"*::: :->status" \ +&& ret=0 + + case $state in + (status) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-dev-status-command-$line[1]:" + case $line[1] in + (ports) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(generate-genesis) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(prover) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__prover_commands" \ +"*::: :->prover" \ +&& ret=0 + + case $state in + (prover) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-prover-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(setup-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init-bellman-cuda) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(compressor-keys) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(server) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(external-node) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__external-node_commands" \ +"*::: :->external-node" \ +&& ret=0 + + case $state in + (external-node) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-external-node-command-$line[1]:" + case $line[1] in + (configs) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(containers) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(contract-verifier) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__contract-verifier_commands" \ +"*::: :->contract-verifier" \ +&& ret=0 + + case $state in + (contract-verifier) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-contract-verifier-command-$line[1]:" + case $line[1] in + (run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(portal) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(explorer) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__explorer_commands" \ +"*::: :->explorer" \ +&& ret=0 + + case $state in + (explorer) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-explorer-command-$line[1]:" + case $line[1] in + (init) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run-backend) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(run) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(consensus) +_arguments "${_arguments_options[@]}" : \ +":: :_zkstack__help__consensus_commands" \ +"*::: :->consensus" \ +&& ret=0 + + case $state in + (consensus) + words=($line[1] "${words[@]}") + (( CURRENT += 1 )) + curcontext="${curcontext%:*:*}:zkstack-help-consensus-command-$line[1]:" + case $line[1] in + (set-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(get-attester-committee) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; +(update) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(markdown) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; +(help) +_arguments "${_arguments_options[@]}" : \ +&& ret=0 +;; + esac + ;; +esac +;; + esac + ;; +esac +} + +(( $+functions[_zkstack_commands] )) || +_zkstack_commands() { + local commands; commands=( +'autocomplete:Create shell autocompletion files' \ +'ecosystem:Ecosystem related commands' \ +'chain:Chain related commands' \ +'dev:Supervisor related commands' \ +'prover:Prover related commands' \ +'server:Run server' \ +'external-node:External Node related commands' \ +'containers:Run containers for local development' \ +'contract-verifier:Run contract verifier' \ +'portal:Run dapp-portal' \ +'explorer:Run block-explorer' \ +'consensus:Consensus utilities' \ +'update:Update ZKsync' \ +'markdown:Print markdown help' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack commands' commands "$@" +} +(( $+functions[_zkstack__autocomplete_commands] )) || +_zkstack__autocomplete_commands() { + local commands; commands=() + _describe -t commands 'zkstack autocomplete commands' commands "$@" +} +(( $+functions[_zkstack__chain_commands] )) || +_zkstack__chain_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'convert-to-gateway:Prepare chain to be an eligible gateway' \ +'migrate-to-gateway:Migrate chain to gateway' \ +'migrate-from-gateway:Migrate chain from gateway' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__accept-chain-ownership_commands] )) || +_zkstack__chain__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__chain__build-transactions_commands] )) || +_zkstack__chain__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__chain__convert-to-gateway_commands] )) || +_zkstack__chain__convert-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain convert-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__create_commands] )) || +_zkstack__chain__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain create commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-consensus-registry_commands] )) || +_zkstack__chain__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-l2-contracts_commands] )) || +_zkstack__chain__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-multicall3_commands] )) || +_zkstack__chain__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-paymaster_commands] )) || +_zkstack__chain__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__chain__deploy-upgrader_commands] )) || +_zkstack__chain__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis_commands] )) || +_zkstack__chain__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain genesis commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help_commands] )) || +_zkstack__chain__genesis__help_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain genesis help commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__help_commands] )) || +_zkstack__chain__genesis__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__init-database_commands] )) || +_zkstack__chain__genesis__help__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__help__server_commands] )) || +_zkstack__chain__genesis__help__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis help server commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__init-database_commands] )) || +_zkstack__chain__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__genesis__server_commands] )) || +_zkstack__chain__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain genesis server commands' commands "$@" +} +(( $+functions[_zkstack__chain__help_commands] )) || +_zkstack__chain__help_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'convert-to-gateway:Prepare chain to be an eligible gateway' \ +'migrate-to-gateway:Migrate chain to gateway' \ +'migrate-from-gateway:Migrate chain from gateway' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain help commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__accept-chain-ownership_commands] )) || +_zkstack__chain__help__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__build-transactions_commands] )) || +_zkstack__chain__help__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__convert-to-gateway_commands] )) || +_zkstack__chain__help__convert-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help convert-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__create_commands] )) || +_zkstack__chain__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help create commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-consensus-registry_commands] )) || +_zkstack__chain__help__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-l2-contracts_commands] )) || +_zkstack__chain__help__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-multicall3_commands] )) || +_zkstack__chain__help__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-paymaster_commands] )) || +_zkstack__chain__help__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__deploy-upgrader_commands] )) || +_zkstack__chain__help__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis_commands] )) || +_zkstack__chain__help__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ + ) + _describe -t commands 'zkstack chain help genesis commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis__init-database_commands] )) || +_zkstack__chain__help__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__genesis__server_commands] )) || +_zkstack__chain__help__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help genesis server commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__help_commands] )) || +_zkstack__chain__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__init_commands] )) || +_zkstack__chain__help__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ + ) + _describe -t commands 'zkstack chain help init commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__init__configs_commands] )) || +_zkstack__chain__help__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help init configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__initialize-bridges_commands] )) || +_zkstack__chain__help__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__migrate-from-gateway_commands] )) || +_zkstack__chain__help__migrate-from-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help migrate-from-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__migrate-to-gateway_commands] )) || +_zkstack__chain__help__migrate-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help migrate-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__register-chain_commands] )) || +_zkstack__chain__help__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help register-chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__help__update-token-multiplier-setter_commands] )) || +_zkstack__chain__help__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain help update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__chain__init_commands] )) || +_zkstack__chain__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain init commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__configs_commands] )) || +_zkstack__chain__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help_commands] )) || +_zkstack__chain__init__help_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack chain init help commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help__configs_commands] )) || +_zkstack__chain__init__help__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init help configs commands' commands "$@" +} +(( $+functions[_zkstack__chain__init__help__help_commands] )) || +_zkstack__chain__init__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain init help help commands' commands "$@" +} +(( $+functions[_zkstack__chain__initialize-bridges_commands] )) || +_zkstack__chain__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__chain__migrate-from-gateway_commands] )) || +_zkstack__chain__migrate-from-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain migrate-from-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__migrate-to-gateway_commands] )) || +_zkstack__chain__migrate-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain migrate-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__chain__register-chain_commands] )) || +_zkstack__chain__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain register-chain commands' commands "$@" +} +(( $+functions[_zkstack__chain__update-token-multiplier-setter_commands] )) || +_zkstack__chain__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack chain update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__consensus_commands] )) || +_zkstack__consensus_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack consensus commands' commands "$@" +} +(( $+functions[_zkstack__consensus__get-attester-committee_commands] )) || +_zkstack__consensus__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help_commands] )) || +_zkstack__consensus__help_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack consensus help commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__get-attester-committee_commands] )) || +_zkstack__consensus__help__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__help_commands] )) || +_zkstack__consensus__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help help commands' commands "$@" +} +(( $+functions[_zkstack__consensus__help__set-attester-committee_commands] )) || +_zkstack__consensus__help__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus help set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__consensus__set-attester-committee_commands] )) || +_zkstack__consensus__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack consensus set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__containers_commands] )) || +_zkstack__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack containers commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier_commands] )) || +_zkstack__contract-verifier_commands() { + local commands; commands=( +'run:Run contract verifier' \ +'init:Download required binaries for contract verifier' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack contract-verifier commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help_commands] )) || +_zkstack__contract-verifier__help_commands() { + local commands; commands=( +'run:Run contract verifier' \ +'init:Download required binaries for contract verifier' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack contract-verifier help commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__help_commands] )) || +_zkstack__contract-verifier__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help help commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__init_commands] )) || +_zkstack__contract-verifier__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help init commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__help__run_commands] )) || +_zkstack__contract-verifier__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier help run commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__init_commands] )) || +_zkstack__contract-verifier__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier init commands' commands "$@" +} +(( $+functions[_zkstack__contract-verifier__run_commands] )) || +_zkstack__contract-verifier__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack contract-verifier run commands' commands "$@" +} +(( $+functions[_zkstack__dev_commands] )) || +_zkstack__dev_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean_commands] )) || +_zkstack__dev__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev clean commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__all_commands] )) || +_zkstack__dev__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean all commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__containers_commands] )) || +_zkstack__dev__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__contracts-cache_commands] )) || +_zkstack__dev__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help_commands] )) || +_zkstack__dev__clean__help_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev clean help commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__all_commands] )) || +_zkstack__dev__clean__help__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help all commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__containers_commands] )) || +_zkstack__dev__clean__help__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__contracts-cache_commands] )) || +_zkstack__dev__clean__help__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__clean__help__help_commands] )) || +_zkstack__dev__clean__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev clean help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__config-writer_commands] )) || +_zkstack__dev__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev config-writer commands' commands "$@" +} +(( $+functions[_zkstack__dev__contracts_commands] )) || +_zkstack__dev__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__database_commands] )) || +_zkstack__dev__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev database commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__check-sqlx-data_commands] )) || +_zkstack__dev__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__drop_commands] )) || +_zkstack__dev__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help_commands] )) || +_zkstack__dev__database__help_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev database help commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__check-sqlx-data_commands] )) || +_zkstack__dev__database__help__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__drop_commands] )) || +_zkstack__dev__database__help__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__help_commands] )) || +_zkstack__dev__database__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__migrate_commands] )) || +_zkstack__dev__database__help__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__new-migration_commands] )) || +_zkstack__dev__database__help__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__prepare_commands] )) || +_zkstack__dev__database__help__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__reset_commands] )) || +_zkstack__dev__database__help__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__help__setup_commands] )) || +_zkstack__dev__database__help__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database help setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__migrate_commands] )) || +_zkstack__dev__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__new-migration_commands] )) || +_zkstack__dev__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__prepare_commands] )) || +_zkstack__dev__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__reset_commands] )) || +_zkstack__dev__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__database__setup_commands] )) || +_zkstack__dev__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev database setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt_commands] )) || +_zkstack__dev__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev fmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__contract_commands] )) || +_zkstack__dev__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help_commands] )) || +_zkstack__dev__fmt__help_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev fmt help commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__contract_commands] )) || +_zkstack__dev__fmt__help__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__help_commands] )) || +_zkstack__dev__fmt__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__prettier_commands] )) || +_zkstack__dev__fmt__help__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__help__rustfmt_commands] )) || +_zkstack__dev__fmt__help__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt help rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__prettier_commands] )) || +_zkstack__dev__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__fmt__rustfmt_commands] )) || +_zkstack__dev__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__generate-genesis_commands] )) || +_zkstack__dev__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__dev__help_commands] )) || +_zkstack__dev__help_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev help commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean_commands] )) || +_zkstack__dev__help__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ + ) + _describe -t commands 'zkstack dev help clean commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__all_commands] )) || +_zkstack__dev__help__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean all commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__containers_commands] )) || +_zkstack__dev__help__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean containers commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__clean__contracts-cache_commands] )) || +_zkstack__dev__help__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__config-writer_commands] )) || +_zkstack__dev__help__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help config-writer commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__contracts_commands] )) || +_zkstack__dev__help__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database_commands] )) || +_zkstack__dev__help__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ + ) + _describe -t commands 'zkstack dev help database commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__check-sqlx-data_commands] )) || +_zkstack__dev__help__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__drop_commands] )) || +_zkstack__dev__help__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database drop commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__migrate_commands] )) || +_zkstack__dev__help__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database migrate commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__new-migration_commands] )) || +_zkstack__dev__help__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__prepare_commands] )) || +_zkstack__dev__help__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database prepare commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__reset_commands] )) || +_zkstack__dev__help__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database reset commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__database__setup_commands] )) || +_zkstack__dev__help__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help database setup commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt_commands] )) || +_zkstack__dev__help__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ + ) + _describe -t commands 'zkstack dev help fmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__contract_commands] )) || +_zkstack__dev__help__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__prettier_commands] )) || +_zkstack__dev__help__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__fmt__rustfmt_commands] )) || +_zkstack__dev__help__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__generate-genesis_commands] )) || +_zkstack__dev__help__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__help_commands] )) || +_zkstack__dev__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__lint_commands] )) || +_zkstack__dev__help__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help lint commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover_commands] )) || +_zkstack__dev__help__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ + ) + _describe -t commands 'zkstack dev help prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__info_commands] )) || +_zkstack__dev__help__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover info commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__insert-batch_commands] )) || +_zkstack__dev__help__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__prover__insert-version_commands] )) || +_zkstack__dev__help__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__send-transactions_commands] )) || +_zkstack__dev__help__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__snapshot_commands] )) || +_zkstack__dev__help__snapshot_commands() { + local commands; commands=( +'create:' \ + ) + _describe -t commands 'zkstack dev help snapshot commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__snapshot__create_commands] )) || +_zkstack__dev__help__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__status_commands] )) || +_zkstack__dev__help__status_commands() { + local commands; commands=( +'ports:Show used ports' \ + ) + _describe -t commands 'zkstack dev help status commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__status__ports_commands] )) || +_zkstack__dev__help__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help status ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test_commands] )) || +_zkstack__dev__help__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ + ) + _describe -t commands 'zkstack dev help test commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__build_commands] )) || +_zkstack__dev__help__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test build commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__fees_commands] )) || +_zkstack__dev__help__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__integration_commands] )) || +_zkstack__dev__help__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__l1-contracts_commands] )) || +_zkstack__dev__help__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__loadtest_commands] )) || +_zkstack__dev__help__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__prover_commands] )) || +_zkstack__dev__help__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__recovery_commands] )) || +_zkstack__dev__help__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__revert_commands] )) || +_zkstack__dev__help__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__rust_commands] )) || +_zkstack__dev__help__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__upgrade_commands] )) || +_zkstack__dev__help__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__help__test__wallet_commands] )) || +_zkstack__dev__help__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev help test wallet commands' commands "$@" +} +(( $+functions[_zkstack__dev__lint_commands] )) || +_zkstack__dev__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev lint commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover_commands] )) || +_zkstack__dev__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help_commands] )) || +_zkstack__dev__prover__help_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev prover help commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__help_commands] )) || +_zkstack__dev__prover__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__info_commands] )) || +_zkstack__dev__prover__help__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help info commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__insert-batch_commands] )) || +_zkstack__dev__prover__help__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__help__insert-version_commands] )) || +_zkstack__dev__prover__help__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover help insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__info_commands] )) || +_zkstack__dev__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover info commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__insert-batch_commands] )) || +_zkstack__dev__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__dev__prover__insert-version_commands] )) || +_zkstack__dev__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__dev__send-transactions_commands] )) || +_zkstack__dev__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot_commands] )) || +_zkstack__dev__snapshot_commands() { + local commands; commands=( +'create:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev snapshot commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__create_commands] )) || +_zkstack__dev__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help_commands] )) || +_zkstack__dev__snapshot__help_commands() { + local commands; commands=( +'create:' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev snapshot help commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help__create_commands] )) || +_zkstack__dev__snapshot__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot help create commands' commands "$@" +} +(( $+functions[_zkstack__dev__snapshot__help__help_commands] )) || +_zkstack__dev__snapshot__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev snapshot help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status_commands] )) || +_zkstack__dev__status_commands() { + local commands; commands=( +'ports:Show used ports' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev status commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help_commands] )) || +_zkstack__dev__status__help_commands() { + local commands; commands=( +'ports:Show used ports' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev status help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help__help_commands] )) || +_zkstack__dev__status__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__help__ports_commands] )) || +_zkstack__dev__status__help__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status help ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__status__ports_commands] )) || +_zkstack__dev__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev status ports commands' commands "$@" +} +(( $+functions[_zkstack__dev__test_commands] )) || +_zkstack__dev__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev test commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__build_commands] )) || +_zkstack__dev__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test build commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__fees_commands] )) || +_zkstack__dev__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help_commands] )) || +_zkstack__dev__test__help_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack dev test help commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__build_commands] )) || +_zkstack__dev__test__help__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help build commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__fees_commands] )) || +_zkstack__dev__test__help__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help fees commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__help_commands] )) || +_zkstack__dev__test__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help help commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__integration_commands] )) || +_zkstack__dev__test__help__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__l1-contracts_commands] )) || +_zkstack__dev__test__help__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__loadtest_commands] )) || +_zkstack__dev__test__help__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__prover_commands] )) || +_zkstack__dev__test__help__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__recovery_commands] )) || +_zkstack__dev__test__help__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__revert_commands] )) || +_zkstack__dev__test__help__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__rust_commands] )) || +_zkstack__dev__test__help__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__upgrade_commands] )) || +_zkstack__dev__test__help__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__help__wallet_commands] )) || +_zkstack__dev__test__help__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test help wallet commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__integration_commands] )) || +_zkstack__dev__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test integration commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__l1-contracts_commands] )) || +_zkstack__dev__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__loadtest_commands] )) || +_zkstack__dev__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__prover_commands] )) || +_zkstack__dev__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test prover commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__recovery_commands] )) || +_zkstack__dev__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test recovery commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__revert_commands] )) || +_zkstack__dev__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test revert commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__rust_commands] )) || +_zkstack__dev__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test rust commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__upgrade_commands] )) || +_zkstack__dev__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__dev__test__wallet_commands] )) || +_zkstack__dev__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack dev test wallet commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem_commands] )) || +_zkstack__ecosystem_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack ecosystem commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__build-transactions_commands] )) || +_zkstack__ecosystem__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__change-default-chain_commands] )) || +_zkstack__ecosystem__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__create_commands] )) || +_zkstack__ecosystem__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem create commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help_commands] )) || +_zkstack__ecosystem__help_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack ecosystem help commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__build-transactions_commands] )) || +_zkstack__ecosystem__help__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__change-default-chain_commands] )) || +_zkstack__ecosystem__help__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__create_commands] )) || +_zkstack__ecosystem__help__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help create commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__help_commands] )) || +_zkstack__ecosystem__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help help commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__init_commands] )) || +_zkstack__ecosystem__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help init commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__help__setup-observability_commands] )) || +_zkstack__ecosystem__help__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem help setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__init_commands] )) || +_zkstack__ecosystem__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem init commands' commands "$@" +} +(( $+functions[_zkstack__ecosystem__setup-observability_commands] )) || +_zkstack__ecosystem__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack ecosystem setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__explorer_commands] )) || +_zkstack__explorer_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack explorer commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help_commands] )) || +_zkstack__explorer__help_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack explorer help commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__help_commands] )) || +_zkstack__explorer__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help help commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__init_commands] )) || +_zkstack__explorer__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help init commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__run_commands] )) || +_zkstack__explorer__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help run commands' commands "$@" +} +(( $+functions[_zkstack__explorer__help__run-backend_commands] )) || +_zkstack__explorer__help__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer help run-backend commands' commands "$@" +} +(( $+functions[_zkstack__explorer__init_commands] )) || +_zkstack__explorer__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer init commands' commands "$@" +} +(( $+functions[_zkstack__explorer__run_commands] )) || +_zkstack__explorer__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer run commands' commands "$@" +} +(( $+functions[_zkstack__explorer__run-backend_commands] )) || +_zkstack__explorer__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack explorer run-backend commands' commands "$@" +} +(( $+functions[_zkstack__external-node_commands] )) || +_zkstack__external-node_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'run:Run external node' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack external-node commands' commands "$@" +} +(( $+functions[_zkstack__external-node__configs_commands] )) || +_zkstack__external-node__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node configs commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help_commands] )) || +_zkstack__external-node__help_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'run:Run external node' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack external-node help commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__configs_commands] )) || +_zkstack__external-node__help__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help configs commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__help_commands] )) || +_zkstack__external-node__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help help commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__init_commands] )) || +_zkstack__external-node__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help init commands' commands "$@" +} +(( $+functions[_zkstack__external-node__help__run_commands] )) || +_zkstack__external-node__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node help run commands' commands "$@" +} +(( $+functions[_zkstack__external-node__init_commands] )) || +_zkstack__external-node__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node init commands' commands "$@" +} +(( $+functions[_zkstack__external-node__run_commands] )) || +_zkstack__external-node__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack external-node run commands' commands "$@" +} +(( $+functions[_zkstack__help_commands] )) || +_zkstack__help_commands() { + local commands; commands=( +'autocomplete:Create shell autocompletion files' \ +'ecosystem:Ecosystem related commands' \ +'chain:Chain related commands' \ +'dev:Supervisor related commands' \ +'prover:Prover related commands' \ +'server:Run server' \ +'external-node:External Node related commands' \ +'containers:Run containers for local development' \ +'contract-verifier:Run contract verifier' \ +'portal:Run dapp-portal' \ +'explorer:Run block-explorer' \ +'consensus:Consensus utilities' \ +'update:Update ZKsync' \ +'markdown:Print markdown help' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack help commands' commands "$@" +} +(( $+functions[_zkstack__help__autocomplete_commands] )) || +_zkstack__help__autocomplete_commands() { + local commands; commands=() + _describe -t commands 'zkstack help autocomplete commands' commands "$@" +} +(( $+functions[_zkstack__help__chain_commands] )) || +_zkstack__help__chain_commands() { + local commands; commands=( +'create:Create a new chain, setting the necessary configurations for later initialization' \ +'build-transactions:Create unsigned transactions for chain deployment' \ +'init:Initialize chain, deploying necessary contracts and performing on-chain operations' \ +'genesis:Run server genesis' \ +'register-chain:Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note\: After completion, L2 governor can accept ownership by running \`accept-chain-ownership\`' \ +'deploy-l2-contracts:Deploy all L2 contracts (executed by L1 governor)' \ +'accept-chain-ownership:Accept ownership of L2 chain (executed by L2 governor). This command should be run after \`register-chain\` to accept ownership of newly created DiamondProxy contract' \ +'initialize-bridges:Initialize bridges on L2' \ +'deploy-consensus-registry:Deploy L2 consensus registry' \ +'deploy-multicall3:Deploy L2 multicall3' \ +'deploy-upgrader:Deploy Default Upgrader' \ +'deploy-paymaster:Deploy paymaster smart contract' \ +'update-token-multiplier-setter:Update Token Multiplier Setter address on L1' \ +'convert-to-gateway:Prepare chain to be an eligible gateway' \ +'migrate-to-gateway:Migrate chain to gateway' \ +'migrate-from-gateway:Migrate chain from gateway' \ + ) + _describe -t commands 'zkstack help chain commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__accept-chain-ownership_commands] )) || +_zkstack__help__chain__accept-chain-ownership_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain accept-chain-ownership commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__build-transactions_commands] )) || +_zkstack__help__chain__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__convert-to-gateway_commands] )) || +_zkstack__help__chain__convert-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain convert-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__create_commands] )) || +_zkstack__help__chain__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain create commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-consensus-registry_commands] )) || +_zkstack__help__chain__deploy-consensus-registry_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-consensus-registry commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-l2-contracts_commands] )) || +_zkstack__help__chain__deploy-l2-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-l2-contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-multicall3_commands] )) || +_zkstack__help__chain__deploy-multicall3_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-multicall3 commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-paymaster_commands] )) || +_zkstack__help__chain__deploy-paymaster_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-paymaster commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__deploy-upgrader_commands] )) || +_zkstack__help__chain__deploy-upgrader_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain deploy-upgrader commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis_commands] )) || +_zkstack__help__chain__genesis_commands() { + local commands; commands=( +'init-database:Initialize databases' \ +'server:Runs server genesis' \ + ) + _describe -t commands 'zkstack help chain genesis commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis__init-database_commands] )) || +_zkstack__help__chain__genesis__init-database_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain genesis init-database commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__genesis__server_commands] )) || +_zkstack__help__chain__genesis__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain genesis server commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__init_commands] )) || +_zkstack__help__chain__init_commands() { + local commands; commands=( +'configs:Initialize chain configs' \ + ) + _describe -t commands 'zkstack help chain init commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__init__configs_commands] )) || +_zkstack__help__chain__init__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain init configs commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__initialize-bridges_commands] )) || +_zkstack__help__chain__initialize-bridges_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain initialize-bridges commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__migrate-from-gateway_commands] )) || +_zkstack__help__chain__migrate-from-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain migrate-from-gateway commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__migrate-to-gateway_commands] )) || +_zkstack__help__chain__migrate-to-gateway_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain migrate-to-gateway commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__register-chain_commands] )) || +_zkstack__help__chain__register-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain register-chain commands' commands "$@" +} +(( $+functions[_zkstack__help__chain__update-token-multiplier-setter_commands] )) || +_zkstack__help__chain__update-token-multiplier-setter_commands() { + local commands; commands=() + _describe -t commands 'zkstack help chain update-token-multiplier-setter commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus_commands] )) || +_zkstack__help__consensus_commands() { + local commands; commands=( +'set-attester-committee:Sets the attester committee in the consensus registry contract to \`consensus.genesis_spec.attesters\` in general.yaml' \ +'get-attester-committee:Fetches the attester committee from the consensus registry contract' \ + ) + _describe -t commands 'zkstack help consensus commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus__get-attester-committee_commands] )) || +_zkstack__help__consensus__get-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack help consensus get-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__help__consensus__set-attester-committee_commands] )) || +_zkstack__help__consensus__set-attester-committee_commands() { + local commands; commands=() + _describe -t commands 'zkstack help consensus set-attester-committee commands' commands "$@" +} +(( $+functions[_zkstack__help__containers_commands] )) || +_zkstack__help__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack help containers commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier_commands] )) || +_zkstack__help__contract-verifier_commands() { + local commands; commands=( +'run:Run contract verifier' \ +'init:Download required binaries for contract verifier' \ + ) + _describe -t commands 'zkstack help contract-verifier commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__init_commands] )) || +_zkstack__help__contract-verifier__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier init commands' commands "$@" +} +(( $+functions[_zkstack__help__contract-verifier__run_commands] )) || +_zkstack__help__contract-verifier__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help contract-verifier run commands' commands "$@" +} +(( $+functions[_zkstack__help__dev_commands] )) || +_zkstack__help__dev_commands() { + local commands; commands=( +'database:Database related commands' \ +'test:Run tests' \ +'clean:Clean artifacts' \ +'snapshot:Snapshots creator' \ +'lint:Lint code' \ +'fmt:Format code' \ +'prover:Protocol version used by provers' \ +'contracts:Build contracts' \ +'config-writer:Overwrite general config' \ +'send-transactions:Send transactions from file' \ +'status:Get status of the server' \ +'generate-genesis:Generate new genesis file based on current contracts' \ + ) + _describe -t commands 'zkstack help dev commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean_commands] )) || +_zkstack__help__dev__clean_commands() { + local commands; commands=( +'all:Remove containers and contracts cache' \ +'containers:Remove containers and docker volumes' \ +'contracts-cache:Remove contracts caches' \ + ) + _describe -t commands 'zkstack help dev clean commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__all_commands] )) || +_zkstack__help__dev__clean__all_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean all commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__containers_commands] )) || +_zkstack__help__dev__clean__containers_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean containers commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__clean__contracts-cache_commands] )) || +_zkstack__help__dev__clean__contracts-cache_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev clean contracts-cache commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__config-writer_commands] )) || +_zkstack__help__dev__config-writer_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev config-writer commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__contracts_commands] )) || +_zkstack__help__dev__contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database_commands] )) || +_zkstack__help__dev__database_commands() { + local commands; commands=( +'check-sqlx-data:Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' \ +'drop:Drop databases. If no databases are selected, all databases will be dropped.' \ +'migrate:Migrate databases. If no databases are selected, all databases will be migrated.' \ +'new-migration:Create new migration' \ +'prepare:Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' \ +'reset:Reset databases. If no databases are selected, all databases will be reset.' \ +'setup:Setup databases. If no databases are selected, all databases will be setup.' \ + ) + _describe -t commands 'zkstack help dev database commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__check-sqlx-data_commands] )) || +_zkstack__help__dev__database__check-sqlx-data_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database check-sqlx-data commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__drop_commands] )) || +_zkstack__help__dev__database__drop_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database drop commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__migrate_commands] )) || +_zkstack__help__dev__database__migrate_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database migrate commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__new-migration_commands] )) || +_zkstack__help__dev__database__new-migration_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database new-migration commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__prepare_commands] )) || +_zkstack__help__dev__database__prepare_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database prepare commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__reset_commands] )) || +_zkstack__help__dev__database__reset_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database reset commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__database__setup_commands] )) || +_zkstack__help__dev__database__setup_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev database setup commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt_commands] )) || +_zkstack__help__dev__fmt_commands() { + local commands; commands=( +'rustfmt:' \ +'contract:' \ +'prettier:' \ + ) + _describe -t commands 'zkstack help dev fmt commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__contract_commands] )) || +_zkstack__help__dev__fmt__contract_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt contract commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__prettier_commands] )) || +_zkstack__help__dev__fmt__prettier_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt prettier commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__fmt__rustfmt_commands] )) || +_zkstack__help__dev__fmt__rustfmt_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev fmt rustfmt commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__generate-genesis_commands] )) || +_zkstack__help__dev__generate-genesis_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev generate-genesis commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__lint_commands] )) || +_zkstack__help__dev__lint_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev lint commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover_commands] )) || +_zkstack__help__dev__prover_commands() { + local commands; commands=( +'info:' \ +'insert-batch:' \ +'insert-version:' \ + ) + _describe -t commands 'zkstack help dev prover commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__info_commands] )) || +_zkstack__help__dev__prover__info_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover info commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__insert-batch_commands] )) || +_zkstack__help__dev__prover__insert-batch_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover insert-batch commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__prover__insert-version_commands] )) || +_zkstack__help__dev__prover__insert-version_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev prover insert-version commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__send-transactions_commands] )) || +_zkstack__help__dev__send-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev send-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__snapshot_commands] )) || +_zkstack__help__dev__snapshot_commands() { + local commands; commands=( +'create:' \ + ) + _describe -t commands 'zkstack help dev snapshot commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__snapshot__create_commands] )) || +_zkstack__help__dev__snapshot__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev snapshot create commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__status_commands] )) || +_zkstack__help__dev__status_commands() { + local commands; commands=( +'ports:Show used ports' \ + ) + _describe -t commands 'zkstack help dev status commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__status__ports_commands] )) || +_zkstack__help__dev__status__ports_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev status ports commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test_commands] )) || +_zkstack__help__dev__test_commands() { + local commands; commands=( +'integration:Run integration tests' \ +'fees:Run fees test' \ +'revert:Run revert tests' \ +'recovery:Run recovery tests' \ +'upgrade:Run upgrade tests' \ +'build:Build all test dependencies' \ +'rust:Run unit-tests, accepts optional cargo test flags' \ +'l1-contracts:Run L1 contracts tests' \ +'prover:Run prover tests' \ +'wallet:Print test wallets information' \ +'loadtest:Run loadtest' \ + ) + _describe -t commands 'zkstack help dev test commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__build_commands] )) || +_zkstack__help__dev__test__build_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test build commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__fees_commands] )) || +_zkstack__help__dev__test__fees_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test fees commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__integration_commands] )) || +_zkstack__help__dev__test__integration_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test integration commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__l1-contracts_commands] )) || +_zkstack__help__dev__test__l1-contracts_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test l1-contracts commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__loadtest_commands] )) || +_zkstack__help__dev__test__loadtest_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test loadtest commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__prover_commands] )) || +_zkstack__help__dev__test__prover_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test prover commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__recovery_commands] )) || +_zkstack__help__dev__test__recovery_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test recovery commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__revert_commands] )) || +_zkstack__help__dev__test__revert_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test revert commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__rust_commands] )) || +_zkstack__help__dev__test__rust_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test rust commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__upgrade_commands] )) || +_zkstack__help__dev__test__upgrade_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test upgrade commands' commands "$@" +} +(( $+functions[_zkstack__help__dev__test__wallet_commands] )) || +_zkstack__help__dev__test__wallet_commands() { + local commands; commands=() + _describe -t commands 'zkstack help dev test wallet commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem_commands] )) || +_zkstack__help__ecosystem_commands() { + local commands; commands=( +'create:Create a new ecosystem and chain, setting necessary configurations for later initialization' \ +'build-transactions:Create transactions to build ecosystem contracts' \ +'init:Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' \ +'change-default-chain:Change the default chain' \ +'setup-observability:Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' \ + ) + _describe -t commands 'zkstack help ecosystem commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__build-transactions_commands] )) || +_zkstack__help__ecosystem__build-transactions_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem build-transactions commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__change-default-chain_commands] )) || +_zkstack__help__ecosystem__change-default-chain_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem change-default-chain commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__create_commands] )) || +_zkstack__help__ecosystem__create_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem create commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__init_commands] )) || +_zkstack__help__ecosystem__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem init commands' commands "$@" +} +(( $+functions[_zkstack__help__ecosystem__setup-observability_commands] )) || +_zkstack__help__ecosystem__setup-observability_commands() { + local commands; commands=() + _describe -t commands 'zkstack help ecosystem setup-observability commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer_commands] )) || +_zkstack__help__explorer_commands() { + local commands; commands=( +'init:Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' \ +'run-backend:Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' \ +'run:Run explorer app' \ + ) + _describe -t commands 'zkstack help explorer commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__init_commands] )) || +_zkstack__help__explorer__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer init commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__run_commands] )) || +_zkstack__help__explorer__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer run commands' commands "$@" +} +(( $+functions[_zkstack__help__explorer__run-backend_commands] )) || +_zkstack__help__explorer__run-backend_commands() { + local commands; commands=() + _describe -t commands 'zkstack help explorer run-backend commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node_commands] )) || +_zkstack__help__external-node_commands() { + local commands; commands=( +'configs:Prepare configs for EN' \ +'init:Init databases' \ +'run:Run external node' \ + ) + _describe -t commands 'zkstack help external-node commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__configs_commands] )) || +_zkstack__help__external-node__configs_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node configs commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__init_commands] )) || +_zkstack__help__external-node__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node init commands' commands "$@" +} +(( $+functions[_zkstack__help__external-node__run_commands] )) || +_zkstack__help__external-node__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help external-node run commands' commands "$@" +} +(( $+functions[_zkstack__help__help_commands] )) || +_zkstack__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack help help commands' commands "$@" +} +(( $+functions[_zkstack__help__markdown_commands] )) || +_zkstack__help__markdown_commands() { + local commands; commands=() + _describe -t commands 'zkstack help markdown commands' commands "$@" +} +(( $+functions[_zkstack__help__portal_commands] )) || +_zkstack__help__portal_commands() { + local commands; commands=() + _describe -t commands 'zkstack help portal commands' commands "$@" +} +(( $+functions[_zkstack__help__prover_commands] )) || +_zkstack__help__prover_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ + ) + _describe -t commands 'zkstack help prover commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__compressor-keys_commands] )) || +_zkstack__help__prover__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__init_commands] )) || +_zkstack__help__prover__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover init commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__init-bellman-cuda_commands] )) || +_zkstack__help__prover__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__run_commands] )) || +_zkstack__help__prover__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover run commands' commands "$@" +} +(( $+functions[_zkstack__help__prover__setup-keys_commands] )) || +_zkstack__help__prover__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack help prover setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__help__server_commands] )) || +_zkstack__help__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack help server commands' commands "$@" +} +(( $+functions[_zkstack__help__update_commands] )) || +_zkstack__help__update_commands() { + local commands; commands=() + _describe -t commands 'zkstack help update commands' commands "$@" +} +(( $+functions[_zkstack__markdown_commands] )) || +_zkstack__markdown_commands() { + local commands; commands=() + _describe -t commands 'zkstack markdown commands' commands "$@" +} +(( $+functions[_zkstack__portal_commands] )) || +_zkstack__portal_commands() { + local commands; commands=() + _describe -t commands 'zkstack portal commands' commands "$@" +} +(( $+functions[_zkstack__prover_commands] )) || +_zkstack__prover_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack prover commands' commands "$@" +} +(( $+functions[_zkstack__prover__compressor-keys_commands] )) || +_zkstack__prover__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__help_commands] )) || +_zkstack__prover__help_commands() { + local commands; commands=( +'init:Initialize prover' \ +'setup-keys:Generate setup keys' \ +'run:Run prover' \ +'init-bellman-cuda:Initialize bellman-cuda' \ +'compressor-keys:Download compressor keys' \ +'help:Print this message or the help of the given subcommand(s)' \ + ) + _describe -t commands 'zkstack prover help commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__compressor-keys_commands] )) || +_zkstack__prover__help__compressor-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help compressor-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__help_commands] )) || +_zkstack__prover__help__help_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help help commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__init_commands] )) || +_zkstack__prover__help__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help init commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__init-bellman-cuda_commands] )) || +_zkstack__prover__help__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__run_commands] )) || +_zkstack__prover__help__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help run commands' commands "$@" +} +(( $+functions[_zkstack__prover__help__setup-keys_commands] )) || +_zkstack__prover__help__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover help setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__prover__init_commands] )) || +_zkstack__prover__init_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover init commands' commands "$@" +} +(( $+functions[_zkstack__prover__init-bellman-cuda_commands] )) || +_zkstack__prover__init-bellman-cuda_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover init-bellman-cuda commands' commands "$@" +} +(( $+functions[_zkstack__prover__run_commands] )) || +_zkstack__prover__run_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover run commands' commands "$@" +} +(( $+functions[_zkstack__prover__setup-keys_commands] )) || +_zkstack__prover__setup-keys_commands() { + local commands; commands=() + _describe -t commands 'zkstack prover setup-keys commands' commands "$@" +} +(( $+functions[_zkstack__server_commands] )) || +_zkstack__server_commands() { + local commands; commands=() + _describe -t commands 'zkstack server commands' commands "$@" +} +(( $+functions[_zkstack__update_commands] )) || +_zkstack__update_commands() { + local commands; commands=() + _describe -t commands 'zkstack update commands' commands "$@" +} + +if [ "$funcstack[1]" = "_zkstack" ]; then + _zkstack "$@" +else + compdef _zkstack zkstack +fi diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.fish b/zkstack_cli/crates/zkstack/completion/zkstack.fish new file mode 100644 index 00000000000..9f449192003 --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/zkstack.fish @@ -0,0 +1,767 @@ +# Print an optspec for argparse to handle cmd's options that are independent of any subcommand. +function __fish_zkstack_global_optspecs + string join \n v/verbose chain= ignore-prerequisites h/help V/version +end + +function __fish_zkstack_needs_command + # Figure out if the current invocation already has a command. + set -l cmd (commandline -opc) + set -e cmd[1] + argparse -s (__fish_zkstack_global_optspecs) -- $cmd 2>/dev/null + or return + if set -q argv[1] + # Also print the command, so this can be used to figure out what it is. + echo $argv[1] + return 1 + end + return 0 +end + +function __fish_zkstack_using_subcommand + set -l cmd (__fish_zkstack_needs_command) + test -z "$cmd" + and return 1 + contains -- $cmd[1] $argv +end + +complete -c zkstack -n "__fish_zkstack_needs_command" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_needs_command" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_needs_command" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_needs_command" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_needs_command" -s V -l version -d 'Print version' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "autocomplete" -d 'Create shell autocompletion files' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "ecosystem" -d 'Ecosystem related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "chain" -d 'Chain related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "dev" -d 'Supervisor related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "prover" -d 'Prover related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "server" -d 'Run server' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "external-node" -d 'External Node related commands' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "containers" -d 'Run containers for local development' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "contract-verifier" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "portal" -d 'Run dapp-portal' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "explorer" -d 'Run block-explorer' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "consensus" -d 'Consensus utilities' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "update" -d 'Update ZKsync' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "markdown" -d 'Print markdown help' +complete -c zkstack -n "__fish_zkstack_needs_command" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l generate -d 'The shell to generate the autocomplete script for' -r -f -a "{bash\t'',elvish\t'',fish\t'',powershell\t'',zsh\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s o -l out -d 'The out directory to write the autocomplete script to' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand autocomplete" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and not __fish_seen_subcommand_from create build-transactions init change-default-chain setup-observability help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l ecosystem-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l l1-network -d 'L1 Network' -r -f -a "{localhost\t'',sepolia\t'',holesky\t'',mainnet\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l link-to-code -d 'Code link' -r -f -a "(__fish_complete_directories)" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain-id -d 'Chain ID' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l prover-mode -d 'Prover options' -r -f -a "{no-proofs\t'',gpu\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l wallet-creation -d 'Wallet options' -r -f -a "{localhost\t'Load wallets from localhost mnemonic, they are funded for localhost env',random\t'Generate random wallets',empty\t'Generate placeholder wallets',in-file\t'Specify file with wallets'}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l wallet-path -d 'Wallet path' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l l1-batch-commit-data-generator-mode -d 'Commit data generation mode' -r -f -a "{rollup\t'',validium\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-address -d 'Base token address' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l start-containers -d 'Start reth and postgres containers after creation' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l legacy-bridge +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l skip-submodules-checkout -d 'Skip submodules checkout' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l skip-contract-compilation-override -d 'Skip contract compilation override' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from create" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l sender -d 'Address of the transaction sender' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s o -l out -d 'Output directory for the generated files' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from build-transactions" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-erc20 -d 'Deploy ERC20 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-ecosystem -d 'Deploy ecosystem contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ecosystem-contracts-path -d 'Path to ecosystem contracts' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l deploy-paymaster -d 'Deploy Paymaster contract' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s o -l observability -d 'Enable Grafana' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ecosystem-only -d 'Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand)' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l dev -d 'Use defaults for all options and flags. Suitable for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l skip-submodules-checkout -d 'Skip submodules checkout' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l skip-contract-compilation-override -d 'Skip contract compilation override' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from change-default-chain" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from setup-observability" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand ecosystem; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "convert-to-gateway" -d 'Prepare chain to be an eligible gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "migrate-to-gateway" -d 'Migrate chain to gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "migrate-from-gateway" -d 'Migrate chain from gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and not __fish_seen_subcommand_from create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain-id -d 'Chain ID' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l prover-mode -d 'Prover options' -r -f -a "{no-proofs\t'',gpu\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l wallet-creation -d 'Wallet options' -r -f -a "{localhost\t'Load wallets from localhost mnemonic, they are funded for localhost env',random\t'Generate random wallets',empty\t'Generate placeholder wallets',in-file\t'Specify file with wallets'}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l wallet-path -d 'Wallet path' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l l1-batch-commit-data-generator-mode -d 'Commit data generation mode' -r -f -a "{rollup\t'',validium\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-address -d 'Base token address' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-nominator -d 'Base token nominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l base-token-price-denominator -d 'Base token denominator' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l set-as-default -d 'Set as default chain' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l evm-emulator -d 'Enable EVM emulator' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l legacy-bridge +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l skip-submodules-checkout -d 'Skip submodules checkout' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l skip-contract-compilation-override -d 'Skip contract compilation override' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from create" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s o -l out -d 'Output directory for the generated files' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from build-transactions" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l deploy-paymaster -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l l1-rpc-url -d 'L1 RPC URL' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l no-port-reallocation -d 'Do not reallocate ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l dev -d 'Use defaults for all options and flags. Suitable for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l skip-submodules-checkout -d 'Skip submodules checkout' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -f -a "configs" -d 'Initialize chain configs' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from init" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-url -d 'Server database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l server-db-name -d 'Server database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s d -l dev -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s d -l dont-drop +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "init-database" -d 'Initialize databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "server" -d 'Runs server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from genesis" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from register-chain" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-l2-contracts" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from accept-chain-ownership" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from initialize-bridges" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-consensus-registry" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-multicall3" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-upgrader" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from deploy-paymaster" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from update-token-multiplier-setter" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from convert-to-gateway" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l gateway-chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-to-gateway" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l verify -d 'Verify deployed contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l verifier -d 'Verifier to use' -r -f -a "{etherscan\t'',sourcify\t'',blockscout\t'',oklink\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l verifier-url -d 'Verifier URL, if using a custom provider' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l verifier-api-key -d 'Verifier API key' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -s a -l additional-args -d 'List of additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l gateway-chain-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l resume +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l zksync +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from migrate-from-gateway" -s h -l help -d 'Print help (see more with \'--help\')' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "convert-to-gateway" -d 'Prepare chain to be an eligible gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "migrate-to-gateway" -d 'Migrate chain to gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "migrate-from-gateway" -d 'Migrate chain from gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand chain; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and not __fish_seen_subcommand_from database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "check-sqlx-data" -d 'Check sqlx-data.json is up to date. If no databases are selected, all databases will be checked.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "drop" -d 'Drop databases. If no databases are selected, all databases will be dropped.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "migrate" -d 'Migrate databases. If no databases are selected, all databases will be migrated.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "new-migration" -d 'Create new migration' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "prepare" -d 'Prepare sqlx-data.json. If no databases are selected, all databases will be prepared.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "reset" -d 'Reset databases. If no databases are selected, all databases will be reset.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "setup" -d 'Setup databases. If no databases are selected, all databases will be setup.' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from database" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "integration" -d 'Run integration tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "fees" -d 'Run fees test' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "revert" -d 'Run revert tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "recovery" -d 'Run recovery tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "upgrade" -d 'Run upgrade tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "build" -d 'Build all test dependencies' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "rust" -d 'Run unit-tests, accepts optional cargo test flags' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "l1-contracts" -d 'Run L1 contracts tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "prover" -d 'Run prover tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "wallet" -d 'Print test wallets information' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "loadtest" -d 'Run loadtest' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from test" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "all" -d 'Remove containers and contracts cache' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "containers" -d 'Remove containers and docker volumes' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "contracts-cache" -d 'Remove contracts caches' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from clean" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -f -a "create" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from snapshot" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s t -l targets -r -f -a "{md\t'',sol\t'',js\t'',ts\t'',rs\t'',contracts\t'',autocompletion\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s c -l check +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from lint" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s c -l check +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "rustfmt" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "contract" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "prettier" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from fmt" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "info" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "insert-batch" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "insert-version" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from prover" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l1-contracts -d 'Build L1 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l1-da-contracts -d 'Build L1 DA contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l l2-contracts -d 'Build L2 contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l system-contracts -d 'Build system contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l test-contracts -d 'Build test contracts' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from contracts" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s p -l path -d 'Path to the config file to override' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from config-writer" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l file -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l private-key -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l l1-rpc-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l confirmations -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from send-transactions" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s u -l url -d 'URL of the health check endpoint' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -f -a "ports" -d 'Show used ports' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from status" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from generate-genesis" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand dev; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and not __fish_seen_subcommand_from init setup-keys run init-bellman-cuda compressor-keys help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l proof-store-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bucket-base-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l credentials-file -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bucket-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l location -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l project-id -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l shall-save-to-public-bucket -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-store-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-bucket-base-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-credentials-file -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-bucket-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-location -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l public-project-id -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bellman-cuda-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l bellman-cuda -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-compressor-key -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l path -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l region -r -f -a "{us\t'',europe\t'',asia\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l mode -r -f -a "{download\t'',generate\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-keys -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l setup-database -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l prover-db-url -d 'Prover database url without database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l prover-db-name -d 'Prover database name' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s u -l use-default -d 'Use default database urls and names' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s d -l dont-drop -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l cloud-type -r -f -a "{gcp\t'',local\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l dev +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l clone +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l region -r -f -a "{us\t'',europe\t'',asia\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l mode -r -f -a "{download\t'',generate\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from setup-keys" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l component -r -f -a "{gateway\t'',witness-generator\t'',witness-vector-generator\t'',prover\t'',circuit-prover\t'',compressor\t'',prover-job-monitor\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l round -r -f -a "{all-rounds\t'',basic-circuits\t'',leaf-aggregation\t'',node-aggregation\t'',recursion-tip\t'',scheduler\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l threads -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l max-allocation -d 'Memory allocation limit in bytes (for prover component)' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l witness-vector-generator-count -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l max-allocation -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l docker -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l tag -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l bellman-cuda-dir -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l clone +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from init-bellman-cuda" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l path -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from compressor-keys" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand prover; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l components -d 'Components of server to run' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -s a -l additional-args -d 'Additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l genesis -d 'Run server in genesis mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l build -d 'Build server but don\'t run it' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l uring -d 'Enables uring support for RocksDB' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand server" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and not __fish_seen_subcommand_from configs init run help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l db-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l db-name -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l l1-rpc-url -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s u -l use-default -d 'Use default database urls and names' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from configs" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l components -d 'Components of server to run' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l enable-consensus -d 'Enable consensus' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s a -l additional-args -d 'Additional arguments that can be passed through the CLI' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l reinit +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand external-node; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s o -l observability -d 'Enable Grafana' -r -f -a "{true\t'',false\t''}" +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand containers" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and not __fish_seen_subcommand_from run init help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l zksolc-version -d 'Version of zksolc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l zkvyper-version -d 'Version of zkvyper to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l solc-version -d 'Version of solc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l era-vm-solc-version -d 'Version of era vm solc to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l vyper-version -d 'Version of vyper to install' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l only -d 'Install only provided compilers' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand contract-verifier; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand portal" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and not __fish_seen_subcommand_from init run-backend run help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from init" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run-backend" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from run" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand explorer; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and not __fish_seen_subcommand_from set-attester-committee get-attester-committee help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l from-file -d 'Sets the attester committee in the consensus registry contract to the committee in the yaml file. File format is definied in `commands/consensus/proto/mod.proto`' -r -F +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l from-genesis -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from set-attester-committee" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from get-attester-committee" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand consensus; and __fish_seen_subcommand_from help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s c -l only-config -d 'Update only the config files' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand update" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -l chain -d 'Chain to use' -r +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -s v -l verbose -d 'Verbose mode' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -l ignore-prerequisites -d 'Ignores prerequisites checks' +complete -c zkstack -n "__fish_zkstack_using_subcommand markdown" -s h -l help -d 'Print help' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "autocomplete" -d 'Create shell autocompletion files' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "ecosystem" -d 'Ecosystem related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "chain" -d 'Chain related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "dev" -d 'Supervisor related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "prover" -d 'Prover related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "server" -d 'Run server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "external-node" -d 'External Node related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "containers" -d 'Run containers for local development' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "contract-verifier" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "portal" -d 'Run dapp-portal' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "explorer" -d 'Run block-explorer' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "consensus" -d 'Consensus utilities' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "update" -d 'Update ZKsync' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "markdown" -d 'Print markdown help' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and not __fish_seen_subcommand_from autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" -f -a "help" -d 'Print this message or the help of the given subcommand(s)' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "create" -d 'Create a new ecosystem and chain, setting necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "build-transactions" -d 'Create transactions to build ecosystem contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "init" -d 'Initialize ecosystem and chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "change-default-chain" -d 'Change the default chain' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from ecosystem" -f -a "setup-observability" -d 'Setup observability for the ecosystem, downloading Grafana dashboards from the era-observability repo' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "create" -d 'Create a new chain, setting the necessary configurations for later initialization' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "build-transactions" -d 'Create unsigned transactions for chain deployment' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "init" -d 'Initialize chain, deploying necessary contracts and performing on-chain operations' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "genesis" -d 'Run server genesis' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "register-chain" -d 'Register a new chain on L1 (executed by L1 governor). This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, registers chain with BridgeHub and sets pending admin for DiamondProxy. Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership`' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-l2-contracts" -d 'Deploy all L2 contracts (executed by L1 governor)' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "accept-chain-ownership" -d 'Accept ownership of L2 chain (executed by L2 governor). This command should be run after `register-chain` to accept ownership of newly created DiamondProxy contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "initialize-bridges" -d 'Initialize bridges on L2' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-consensus-registry" -d 'Deploy L2 consensus registry' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-multicall3" -d 'Deploy L2 multicall3' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-upgrader" -d 'Deploy Default Upgrader' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "deploy-paymaster" -d 'Deploy paymaster smart contract' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "update-token-multiplier-setter" -d 'Update Token Multiplier Setter address on L1' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "convert-to-gateway" -d 'Prepare chain to be an eligible gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "migrate-to-gateway" -d 'Migrate chain to gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from chain" -f -a "migrate-from-gateway" -d 'Migrate chain from gateway' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "database" -d 'Database related commands' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "test" -d 'Run tests' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "clean" -d 'Clean artifacts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "snapshot" -d 'Snapshots creator' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "lint" -d 'Lint code' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "fmt" -d 'Format code' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "prover" -d 'Protocol version used by provers' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "contracts" -d 'Build contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "config-writer" -d 'Overwrite general config' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "send-transactions" -d 'Send transactions from file' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "status" -d 'Get status of the server' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from dev" -f -a "generate-genesis" -d 'Generate new genesis file based on current contracts' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "init" -d 'Initialize prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "setup-keys" -d 'Generate setup keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "run" -d 'Run prover' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "init-bellman-cuda" -d 'Initialize bellman-cuda' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from prover" -f -a "compressor-keys" -d 'Download compressor keys' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "configs" -d 'Prepare configs for EN' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "init" -d 'Init databases' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from external-node" -f -a "run" -d 'Run external node' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "run" -d 'Run contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from contract-verifier" -f -a "init" -d 'Download required binaries for contract verifier' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "init" -d 'Initialize explorer (create database to store explorer data and generate docker compose file with explorer services). Runs for all chains, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "run-backend" -d 'Start explorer backend services (api, data_fetcher, worker) for a given chain. Uses default chain, unless --chain is passed' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from explorer" -f -a "run" -d 'Run explorer app' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from consensus" -f -a "set-attester-committee" -d 'Sets the attester committee in the consensus registry contract to `consensus.genesis_spec.attesters` in general.yaml' +complete -c zkstack -n "__fish_zkstack_using_subcommand help; and __fish_seen_subcommand_from consensus" -f -a "get-attester-committee" -d 'Fetches the attester committee from the consensus registry contract' diff --git a/zkstack_cli/crates/zkstack/completion/zkstack.sh b/zkstack_cli/crates/zkstack/completion/zkstack.sh new file mode 100644 index 00000000000..57294750ca4 --- /dev/null +++ b/zkstack_cli/crates/zkstack/completion/zkstack.sh @@ -0,0 +1,7255 @@ +_zkstack() { + local i cur prev opts cmd + COMPREPLY=() + cur="${COMP_WORDS[COMP_CWORD]}" + prev="${COMP_WORDS[COMP_CWORD-1]}" + cmd="" + opts="" + + for i in ${COMP_WORDS[@]} + do + case "${cmd},${i}" in + ",$1") + cmd="zkstack" + ;; + zkstack,autocomplete) + cmd="zkstack__autocomplete" + ;; + zkstack,chain) + cmd="zkstack__chain" + ;; + zkstack,consensus) + cmd="zkstack__consensus" + ;; + zkstack,containers) + cmd="zkstack__containers" + ;; + zkstack,contract-verifier) + cmd="zkstack__contract__verifier" + ;; + zkstack,dev) + cmd="zkstack__dev" + ;; + zkstack,ecosystem) + cmd="zkstack__ecosystem" + ;; + zkstack,explorer) + cmd="zkstack__explorer" + ;; + zkstack,external-node) + cmd="zkstack__external__node" + ;; + zkstack,help) + cmd="zkstack__help" + ;; + zkstack,markdown) + cmd="zkstack__markdown" + ;; + zkstack,portal) + cmd="zkstack__portal" + ;; + zkstack,prover) + cmd="zkstack__prover" + ;; + zkstack,server) + cmd="zkstack__server" + ;; + zkstack,update) + cmd="zkstack__update" + ;; + zkstack__chain,accept-chain-ownership) + cmd="zkstack__chain__accept__chain__ownership" + ;; + zkstack__chain,build-transactions) + cmd="zkstack__chain__build__transactions" + ;; + zkstack__chain,convert-to-gateway) + cmd="zkstack__chain__convert__to__gateway" + ;; + zkstack__chain,create) + cmd="zkstack__chain__create" + ;; + zkstack__chain,deploy-consensus-registry) + cmd="zkstack__chain__deploy__consensus__registry" + ;; + zkstack__chain,deploy-l2-contracts) + cmd="zkstack__chain__deploy__l2__contracts" + ;; + zkstack__chain,deploy-multicall3) + cmd="zkstack__chain__deploy__multicall3" + ;; + zkstack__chain,deploy-paymaster) + cmd="zkstack__chain__deploy__paymaster" + ;; + zkstack__chain,deploy-upgrader) + cmd="zkstack__chain__deploy__upgrader" + ;; + zkstack__chain,genesis) + cmd="zkstack__chain__genesis" + ;; + zkstack__chain,help) + cmd="zkstack__chain__help" + ;; + zkstack__chain,init) + cmd="zkstack__chain__init" + ;; + zkstack__chain,initialize-bridges) + cmd="zkstack__chain__initialize__bridges" + ;; + zkstack__chain,migrate-from-gateway) + cmd="zkstack__chain__migrate__from__gateway" + ;; + zkstack__chain,migrate-to-gateway) + cmd="zkstack__chain__migrate__to__gateway" + ;; + zkstack__chain,register-chain) + cmd="zkstack__chain__register__chain" + ;; + zkstack__chain,update-token-multiplier-setter) + cmd="zkstack__chain__update__token__multiplier__setter" + ;; + zkstack__chain__genesis,help) + cmd="zkstack__chain__genesis__help" + ;; + zkstack__chain__genesis,init-database) + cmd="zkstack__chain__genesis__init__database" + ;; + zkstack__chain__genesis,server) + cmd="zkstack__chain__genesis__server" + ;; + zkstack__chain__genesis__help,help) + cmd="zkstack__chain__genesis__help__help" + ;; + zkstack__chain__genesis__help,init-database) + cmd="zkstack__chain__genesis__help__init__database" + ;; + zkstack__chain__genesis__help,server) + cmd="zkstack__chain__genesis__help__server" + ;; + zkstack__chain__help,accept-chain-ownership) + cmd="zkstack__chain__help__accept__chain__ownership" + ;; + zkstack__chain__help,build-transactions) + cmd="zkstack__chain__help__build__transactions" + ;; + zkstack__chain__help,convert-to-gateway) + cmd="zkstack__chain__help__convert__to__gateway" + ;; + zkstack__chain__help,create) + cmd="zkstack__chain__help__create" + ;; + zkstack__chain__help,deploy-consensus-registry) + cmd="zkstack__chain__help__deploy__consensus__registry" + ;; + zkstack__chain__help,deploy-l2-contracts) + cmd="zkstack__chain__help__deploy__l2__contracts" + ;; + zkstack__chain__help,deploy-multicall3) + cmd="zkstack__chain__help__deploy__multicall3" + ;; + zkstack__chain__help,deploy-paymaster) + cmd="zkstack__chain__help__deploy__paymaster" + ;; + zkstack__chain__help,deploy-upgrader) + cmd="zkstack__chain__help__deploy__upgrader" + ;; + zkstack__chain__help,genesis) + cmd="zkstack__chain__help__genesis" + ;; + zkstack__chain__help,help) + cmd="zkstack__chain__help__help" + ;; + zkstack__chain__help,init) + cmd="zkstack__chain__help__init" + ;; + zkstack__chain__help,initialize-bridges) + cmd="zkstack__chain__help__initialize__bridges" + ;; + zkstack__chain__help,migrate-from-gateway) + cmd="zkstack__chain__help__migrate__from__gateway" + ;; + zkstack__chain__help,migrate-to-gateway) + cmd="zkstack__chain__help__migrate__to__gateway" + ;; + zkstack__chain__help,register-chain) + cmd="zkstack__chain__help__register__chain" + ;; + zkstack__chain__help,update-token-multiplier-setter) + cmd="zkstack__chain__help__update__token__multiplier__setter" + ;; + zkstack__chain__help__genesis,init-database) + cmd="zkstack__chain__help__genesis__init__database" + ;; + zkstack__chain__help__genesis,server) + cmd="zkstack__chain__help__genesis__server" + ;; + zkstack__chain__help__init,configs) + cmd="zkstack__chain__help__init__configs" + ;; + zkstack__chain__init,configs) + cmd="zkstack__chain__init__configs" + ;; + zkstack__chain__init,help) + cmd="zkstack__chain__init__help" + ;; + zkstack__chain__init__help,configs) + cmd="zkstack__chain__init__help__configs" + ;; + zkstack__chain__init__help,help) + cmd="zkstack__chain__init__help__help" + ;; + zkstack__consensus,get-attester-committee) + cmd="zkstack__consensus__get__attester__committee" + ;; + zkstack__consensus,help) + cmd="zkstack__consensus__help" + ;; + zkstack__consensus,set-attester-committee) + cmd="zkstack__consensus__set__attester__committee" + ;; + zkstack__consensus__help,get-attester-committee) + cmd="zkstack__consensus__help__get__attester__committee" + ;; + zkstack__consensus__help,help) + cmd="zkstack__consensus__help__help" + ;; + zkstack__consensus__help,set-attester-committee) + cmd="zkstack__consensus__help__set__attester__committee" + ;; + zkstack__contract__verifier,help) + cmd="zkstack__contract__verifier__help" + ;; + zkstack__contract__verifier,init) + cmd="zkstack__contract__verifier__init" + ;; + zkstack__contract__verifier,run) + cmd="zkstack__contract__verifier__run" + ;; + zkstack__contract__verifier__help,help) + cmd="zkstack__contract__verifier__help__help" + ;; + zkstack__contract__verifier__help,init) + cmd="zkstack__contract__verifier__help__init" + ;; + zkstack__contract__verifier__help,run) + cmd="zkstack__contract__verifier__help__run" + ;; + zkstack__dev,clean) + cmd="zkstack__dev__clean" + ;; + zkstack__dev,config-writer) + cmd="zkstack__dev__config__writer" + ;; + zkstack__dev,contracts) + cmd="zkstack__dev__contracts" + ;; + zkstack__dev,database) + cmd="zkstack__dev__database" + ;; + zkstack__dev,fmt) + cmd="zkstack__dev__fmt" + ;; + zkstack__dev,generate-genesis) + cmd="zkstack__dev__generate__genesis" + ;; + zkstack__dev,help) + cmd="zkstack__dev__help" + ;; + zkstack__dev,lint) + cmd="zkstack__dev__lint" + ;; + zkstack__dev,prover) + cmd="zkstack__dev__prover" + ;; + zkstack__dev,send-transactions) + cmd="zkstack__dev__send__transactions" + ;; + zkstack__dev,snapshot) + cmd="zkstack__dev__snapshot" + ;; + zkstack__dev,status) + cmd="zkstack__dev__status" + ;; + zkstack__dev,test) + cmd="zkstack__dev__test" + ;; + zkstack__dev__clean,all) + cmd="zkstack__dev__clean__all" + ;; + zkstack__dev__clean,containers) + cmd="zkstack__dev__clean__containers" + ;; + zkstack__dev__clean,contracts-cache) + cmd="zkstack__dev__clean__contracts__cache" + ;; + zkstack__dev__clean,help) + cmd="zkstack__dev__clean__help" + ;; + zkstack__dev__clean__help,all) + cmd="zkstack__dev__clean__help__all" + ;; + zkstack__dev__clean__help,containers) + cmd="zkstack__dev__clean__help__containers" + ;; + zkstack__dev__clean__help,contracts-cache) + cmd="zkstack__dev__clean__help__contracts__cache" + ;; + zkstack__dev__clean__help,help) + cmd="zkstack__dev__clean__help__help" + ;; + zkstack__dev__database,check-sqlx-data) + cmd="zkstack__dev__database__check__sqlx__data" + ;; + zkstack__dev__database,drop) + cmd="zkstack__dev__database__drop" + ;; + zkstack__dev__database,help) + cmd="zkstack__dev__database__help" + ;; + zkstack__dev__database,migrate) + cmd="zkstack__dev__database__migrate" + ;; + zkstack__dev__database,new-migration) + cmd="zkstack__dev__database__new__migration" + ;; + zkstack__dev__database,prepare) + cmd="zkstack__dev__database__prepare" + ;; + zkstack__dev__database,reset) + cmd="zkstack__dev__database__reset" + ;; + zkstack__dev__database,setup) + cmd="zkstack__dev__database__setup" + ;; + zkstack__dev__database__help,check-sqlx-data) + cmd="zkstack__dev__database__help__check__sqlx__data" + ;; + zkstack__dev__database__help,drop) + cmd="zkstack__dev__database__help__drop" + ;; + zkstack__dev__database__help,help) + cmd="zkstack__dev__database__help__help" + ;; + zkstack__dev__database__help,migrate) + cmd="zkstack__dev__database__help__migrate" + ;; + zkstack__dev__database__help,new-migration) + cmd="zkstack__dev__database__help__new__migration" + ;; + zkstack__dev__database__help,prepare) + cmd="zkstack__dev__database__help__prepare" + ;; + zkstack__dev__database__help,reset) + cmd="zkstack__dev__database__help__reset" + ;; + zkstack__dev__database__help,setup) + cmd="zkstack__dev__database__help__setup" + ;; + zkstack__dev__fmt,contract) + cmd="zkstack__dev__fmt__contract" + ;; + zkstack__dev__fmt,help) + cmd="zkstack__dev__fmt__help" + ;; + zkstack__dev__fmt,prettier) + cmd="zkstack__dev__fmt__prettier" + ;; + zkstack__dev__fmt,rustfmt) + cmd="zkstack__dev__fmt__rustfmt" + ;; + zkstack__dev__fmt__help,contract) + cmd="zkstack__dev__fmt__help__contract" + ;; + zkstack__dev__fmt__help,help) + cmd="zkstack__dev__fmt__help__help" + ;; + zkstack__dev__fmt__help,prettier) + cmd="zkstack__dev__fmt__help__prettier" + ;; + zkstack__dev__fmt__help,rustfmt) + cmd="zkstack__dev__fmt__help__rustfmt" + ;; + zkstack__dev__help,clean) + cmd="zkstack__dev__help__clean" + ;; + zkstack__dev__help,config-writer) + cmd="zkstack__dev__help__config__writer" + ;; + zkstack__dev__help,contracts) + cmd="zkstack__dev__help__contracts" + ;; + zkstack__dev__help,database) + cmd="zkstack__dev__help__database" + ;; + zkstack__dev__help,fmt) + cmd="zkstack__dev__help__fmt" + ;; + zkstack__dev__help,generate-genesis) + cmd="zkstack__dev__help__generate__genesis" + ;; + zkstack__dev__help,help) + cmd="zkstack__dev__help__help" + ;; + zkstack__dev__help,lint) + cmd="zkstack__dev__help__lint" + ;; + zkstack__dev__help,prover) + cmd="zkstack__dev__help__prover" + ;; + zkstack__dev__help,send-transactions) + cmd="zkstack__dev__help__send__transactions" + ;; + zkstack__dev__help,snapshot) + cmd="zkstack__dev__help__snapshot" + ;; + zkstack__dev__help,status) + cmd="zkstack__dev__help__status" + ;; + zkstack__dev__help,test) + cmd="zkstack__dev__help__test" + ;; + zkstack__dev__help__clean,all) + cmd="zkstack__dev__help__clean__all" + ;; + zkstack__dev__help__clean,containers) + cmd="zkstack__dev__help__clean__containers" + ;; + zkstack__dev__help__clean,contracts-cache) + cmd="zkstack__dev__help__clean__contracts__cache" + ;; + zkstack__dev__help__database,check-sqlx-data) + cmd="zkstack__dev__help__database__check__sqlx__data" + ;; + zkstack__dev__help__database,drop) + cmd="zkstack__dev__help__database__drop" + ;; + zkstack__dev__help__database,migrate) + cmd="zkstack__dev__help__database__migrate" + ;; + zkstack__dev__help__database,new-migration) + cmd="zkstack__dev__help__database__new__migration" + ;; + zkstack__dev__help__database,prepare) + cmd="zkstack__dev__help__database__prepare" + ;; + zkstack__dev__help__database,reset) + cmd="zkstack__dev__help__database__reset" + ;; + zkstack__dev__help__database,setup) + cmd="zkstack__dev__help__database__setup" + ;; + zkstack__dev__help__fmt,contract) + cmd="zkstack__dev__help__fmt__contract" + ;; + zkstack__dev__help__fmt,prettier) + cmd="zkstack__dev__help__fmt__prettier" + ;; + zkstack__dev__help__fmt,rustfmt) + cmd="zkstack__dev__help__fmt__rustfmt" + ;; + zkstack__dev__help__prover,info) + cmd="zkstack__dev__help__prover__info" + ;; + zkstack__dev__help__prover,insert-batch) + cmd="zkstack__dev__help__prover__insert__batch" + ;; + zkstack__dev__help__prover,insert-version) + cmd="zkstack__dev__help__prover__insert__version" + ;; + zkstack__dev__help__snapshot,create) + cmd="zkstack__dev__help__snapshot__create" + ;; + zkstack__dev__help__status,ports) + cmd="zkstack__dev__help__status__ports" + ;; + zkstack__dev__help__test,build) + cmd="zkstack__dev__help__test__build" + ;; + zkstack__dev__help__test,fees) + cmd="zkstack__dev__help__test__fees" + ;; + zkstack__dev__help__test,integration) + cmd="zkstack__dev__help__test__integration" + ;; + zkstack__dev__help__test,l1-contracts) + cmd="zkstack__dev__help__test__l1__contracts" + ;; + zkstack__dev__help__test,loadtest) + cmd="zkstack__dev__help__test__loadtest" + ;; + zkstack__dev__help__test,prover) + cmd="zkstack__dev__help__test__prover" + ;; + zkstack__dev__help__test,recovery) + cmd="zkstack__dev__help__test__recovery" + ;; + zkstack__dev__help__test,revert) + cmd="zkstack__dev__help__test__revert" + ;; + zkstack__dev__help__test,rust) + cmd="zkstack__dev__help__test__rust" + ;; + zkstack__dev__help__test,upgrade) + cmd="zkstack__dev__help__test__upgrade" + ;; + zkstack__dev__help__test,wallet) + cmd="zkstack__dev__help__test__wallet" + ;; + zkstack__dev__prover,help) + cmd="zkstack__dev__prover__help" + ;; + zkstack__dev__prover,info) + cmd="zkstack__dev__prover__info" + ;; + zkstack__dev__prover,insert-batch) + cmd="zkstack__dev__prover__insert__batch" + ;; + zkstack__dev__prover,insert-version) + cmd="zkstack__dev__prover__insert__version" + ;; + zkstack__dev__prover__help,help) + cmd="zkstack__dev__prover__help__help" + ;; + zkstack__dev__prover__help,info) + cmd="zkstack__dev__prover__help__info" + ;; + zkstack__dev__prover__help,insert-batch) + cmd="zkstack__dev__prover__help__insert__batch" + ;; + zkstack__dev__prover__help,insert-version) + cmd="zkstack__dev__prover__help__insert__version" + ;; + zkstack__dev__snapshot,create) + cmd="zkstack__dev__snapshot__create" + ;; + zkstack__dev__snapshot,help) + cmd="zkstack__dev__snapshot__help" + ;; + zkstack__dev__snapshot__help,create) + cmd="zkstack__dev__snapshot__help__create" + ;; + zkstack__dev__snapshot__help,help) + cmd="zkstack__dev__snapshot__help__help" + ;; + zkstack__dev__status,help) + cmd="zkstack__dev__status__help" + ;; + zkstack__dev__status,ports) + cmd="zkstack__dev__status__ports" + ;; + zkstack__dev__status__help,help) + cmd="zkstack__dev__status__help__help" + ;; + zkstack__dev__status__help,ports) + cmd="zkstack__dev__status__help__ports" + ;; + zkstack__dev__test,build) + cmd="zkstack__dev__test__build" + ;; + zkstack__dev__test,fees) + cmd="zkstack__dev__test__fees" + ;; + zkstack__dev__test,help) + cmd="zkstack__dev__test__help" + ;; + zkstack__dev__test,integration) + cmd="zkstack__dev__test__integration" + ;; + zkstack__dev__test,l1-contracts) + cmd="zkstack__dev__test__l1__contracts" + ;; + zkstack__dev__test,loadtest) + cmd="zkstack__dev__test__loadtest" + ;; + zkstack__dev__test,prover) + cmd="zkstack__dev__test__prover" + ;; + zkstack__dev__test,recovery) + cmd="zkstack__dev__test__recovery" + ;; + zkstack__dev__test,revert) + cmd="zkstack__dev__test__revert" + ;; + zkstack__dev__test,rust) + cmd="zkstack__dev__test__rust" + ;; + zkstack__dev__test,upgrade) + cmd="zkstack__dev__test__upgrade" + ;; + zkstack__dev__test,wallet) + cmd="zkstack__dev__test__wallet" + ;; + zkstack__dev__test__help,build) + cmd="zkstack__dev__test__help__build" + ;; + zkstack__dev__test__help,fees) + cmd="zkstack__dev__test__help__fees" + ;; + zkstack__dev__test__help,help) + cmd="zkstack__dev__test__help__help" + ;; + zkstack__dev__test__help,integration) + cmd="zkstack__dev__test__help__integration" + ;; + zkstack__dev__test__help,l1-contracts) + cmd="zkstack__dev__test__help__l1__contracts" + ;; + zkstack__dev__test__help,loadtest) + cmd="zkstack__dev__test__help__loadtest" + ;; + zkstack__dev__test__help,prover) + cmd="zkstack__dev__test__help__prover" + ;; + zkstack__dev__test__help,recovery) + cmd="zkstack__dev__test__help__recovery" + ;; + zkstack__dev__test__help,revert) + cmd="zkstack__dev__test__help__revert" + ;; + zkstack__dev__test__help,rust) + cmd="zkstack__dev__test__help__rust" + ;; + zkstack__dev__test__help,upgrade) + cmd="zkstack__dev__test__help__upgrade" + ;; + zkstack__dev__test__help,wallet) + cmd="zkstack__dev__test__help__wallet" + ;; + zkstack__ecosystem,build-transactions) + cmd="zkstack__ecosystem__build__transactions" + ;; + zkstack__ecosystem,change-default-chain) + cmd="zkstack__ecosystem__change__default__chain" + ;; + zkstack__ecosystem,create) + cmd="zkstack__ecosystem__create" + ;; + zkstack__ecosystem,help) + cmd="zkstack__ecosystem__help" + ;; + zkstack__ecosystem,init) + cmd="zkstack__ecosystem__init" + ;; + zkstack__ecosystem,setup-observability) + cmd="zkstack__ecosystem__setup__observability" + ;; + zkstack__ecosystem__help,build-transactions) + cmd="zkstack__ecosystem__help__build__transactions" + ;; + zkstack__ecosystem__help,change-default-chain) + cmd="zkstack__ecosystem__help__change__default__chain" + ;; + zkstack__ecosystem__help,create) + cmd="zkstack__ecosystem__help__create" + ;; + zkstack__ecosystem__help,help) + cmd="zkstack__ecosystem__help__help" + ;; + zkstack__ecosystem__help,init) + cmd="zkstack__ecosystem__help__init" + ;; + zkstack__ecosystem__help,setup-observability) + cmd="zkstack__ecosystem__help__setup__observability" + ;; + zkstack__explorer,help) + cmd="zkstack__explorer__help" + ;; + zkstack__explorer,init) + cmd="zkstack__explorer__init" + ;; + zkstack__explorer,run) + cmd="zkstack__explorer__run" + ;; + zkstack__explorer,run-backend) + cmd="zkstack__explorer__run__backend" + ;; + zkstack__explorer__help,help) + cmd="zkstack__explorer__help__help" + ;; + zkstack__explorer__help,init) + cmd="zkstack__explorer__help__init" + ;; + zkstack__explorer__help,run) + cmd="zkstack__explorer__help__run" + ;; + zkstack__explorer__help,run-backend) + cmd="zkstack__explorer__help__run__backend" + ;; + zkstack__external__node,configs) + cmd="zkstack__external__node__configs" + ;; + zkstack__external__node,help) + cmd="zkstack__external__node__help" + ;; + zkstack__external__node,init) + cmd="zkstack__external__node__init" + ;; + zkstack__external__node,run) + cmd="zkstack__external__node__run" + ;; + zkstack__external__node__help,configs) + cmd="zkstack__external__node__help__configs" + ;; + zkstack__external__node__help,help) + cmd="zkstack__external__node__help__help" + ;; + zkstack__external__node__help,init) + cmd="zkstack__external__node__help__init" + ;; + zkstack__external__node__help,run) + cmd="zkstack__external__node__help__run" + ;; + zkstack__help,autocomplete) + cmd="zkstack__help__autocomplete" + ;; + zkstack__help,chain) + cmd="zkstack__help__chain" + ;; + zkstack__help,consensus) + cmd="zkstack__help__consensus" + ;; + zkstack__help,containers) + cmd="zkstack__help__containers" + ;; + zkstack__help,contract-verifier) + cmd="zkstack__help__contract__verifier" + ;; + zkstack__help,dev) + cmd="zkstack__help__dev" + ;; + zkstack__help,ecosystem) + cmd="zkstack__help__ecosystem" + ;; + zkstack__help,explorer) + cmd="zkstack__help__explorer" + ;; + zkstack__help,external-node) + cmd="zkstack__help__external__node" + ;; + zkstack__help,help) + cmd="zkstack__help__help" + ;; + zkstack__help,markdown) + cmd="zkstack__help__markdown" + ;; + zkstack__help,portal) + cmd="zkstack__help__portal" + ;; + zkstack__help,prover) + cmd="zkstack__help__prover" + ;; + zkstack__help,server) + cmd="zkstack__help__server" + ;; + zkstack__help,update) + cmd="zkstack__help__update" + ;; + zkstack__help__chain,accept-chain-ownership) + cmd="zkstack__help__chain__accept__chain__ownership" + ;; + zkstack__help__chain,build-transactions) + cmd="zkstack__help__chain__build__transactions" + ;; + zkstack__help__chain,convert-to-gateway) + cmd="zkstack__help__chain__convert__to__gateway" + ;; + zkstack__help__chain,create) + cmd="zkstack__help__chain__create" + ;; + zkstack__help__chain,deploy-consensus-registry) + cmd="zkstack__help__chain__deploy__consensus__registry" + ;; + zkstack__help__chain,deploy-l2-contracts) + cmd="zkstack__help__chain__deploy__l2__contracts" + ;; + zkstack__help__chain,deploy-multicall3) + cmd="zkstack__help__chain__deploy__multicall3" + ;; + zkstack__help__chain,deploy-paymaster) + cmd="zkstack__help__chain__deploy__paymaster" + ;; + zkstack__help__chain,deploy-upgrader) + cmd="zkstack__help__chain__deploy__upgrader" + ;; + zkstack__help__chain,genesis) + cmd="zkstack__help__chain__genesis" + ;; + zkstack__help__chain,init) + cmd="zkstack__help__chain__init" + ;; + zkstack__help__chain,initialize-bridges) + cmd="zkstack__help__chain__initialize__bridges" + ;; + zkstack__help__chain,migrate-from-gateway) + cmd="zkstack__help__chain__migrate__from__gateway" + ;; + zkstack__help__chain,migrate-to-gateway) + cmd="zkstack__help__chain__migrate__to__gateway" + ;; + zkstack__help__chain,register-chain) + cmd="zkstack__help__chain__register__chain" + ;; + zkstack__help__chain,update-token-multiplier-setter) + cmd="zkstack__help__chain__update__token__multiplier__setter" + ;; + zkstack__help__chain__genesis,init-database) + cmd="zkstack__help__chain__genesis__init__database" + ;; + zkstack__help__chain__genesis,server) + cmd="zkstack__help__chain__genesis__server" + ;; + zkstack__help__chain__init,configs) + cmd="zkstack__help__chain__init__configs" + ;; + zkstack__help__consensus,get-attester-committee) + cmd="zkstack__help__consensus__get__attester__committee" + ;; + zkstack__help__consensus,set-attester-committee) + cmd="zkstack__help__consensus__set__attester__committee" + ;; + zkstack__help__contract__verifier,init) + cmd="zkstack__help__contract__verifier__init" + ;; + zkstack__help__contract__verifier,run) + cmd="zkstack__help__contract__verifier__run" + ;; + zkstack__help__dev,clean) + cmd="zkstack__help__dev__clean" + ;; + zkstack__help__dev,config-writer) + cmd="zkstack__help__dev__config__writer" + ;; + zkstack__help__dev,contracts) + cmd="zkstack__help__dev__contracts" + ;; + zkstack__help__dev,database) + cmd="zkstack__help__dev__database" + ;; + zkstack__help__dev,fmt) + cmd="zkstack__help__dev__fmt" + ;; + zkstack__help__dev,generate-genesis) + cmd="zkstack__help__dev__generate__genesis" + ;; + zkstack__help__dev,lint) + cmd="zkstack__help__dev__lint" + ;; + zkstack__help__dev,prover) + cmd="zkstack__help__dev__prover" + ;; + zkstack__help__dev,send-transactions) + cmd="zkstack__help__dev__send__transactions" + ;; + zkstack__help__dev,snapshot) + cmd="zkstack__help__dev__snapshot" + ;; + zkstack__help__dev,status) + cmd="zkstack__help__dev__status" + ;; + zkstack__help__dev,test) + cmd="zkstack__help__dev__test" + ;; + zkstack__help__dev__clean,all) + cmd="zkstack__help__dev__clean__all" + ;; + zkstack__help__dev__clean,containers) + cmd="zkstack__help__dev__clean__containers" + ;; + zkstack__help__dev__clean,contracts-cache) + cmd="zkstack__help__dev__clean__contracts__cache" + ;; + zkstack__help__dev__database,check-sqlx-data) + cmd="zkstack__help__dev__database__check__sqlx__data" + ;; + zkstack__help__dev__database,drop) + cmd="zkstack__help__dev__database__drop" + ;; + zkstack__help__dev__database,migrate) + cmd="zkstack__help__dev__database__migrate" + ;; + zkstack__help__dev__database,new-migration) + cmd="zkstack__help__dev__database__new__migration" + ;; + zkstack__help__dev__database,prepare) + cmd="zkstack__help__dev__database__prepare" + ;; + zkstack__help__dev__database,reset) + cmd="zkstack__help__dev__database__reset" + ;; + zkstack__help__dev__database,setup) + cmd="zkstack__help__dev__database__setup" + ;; + zkstack__help__dev__fmt,contract) + cmd="zkstack__help__dev__fmt__contract" + ;; + zkstack__help__dev__fmt,prettier) + cmd="zkstack__help__dev__fmt__prettier" + ;; + zkstack__help__dev__fmt,rustfmt) + cmd="zkstack__help__dev__fmt__rustfmt" + ;; + zkstack__help__dev__prover,info) + cmd="zkstack__help__dev__prover__info" + ;; + zkstack__help__dev__prover,insert-batch) + cmd="zkstack__help__dev__prover__insert__batch" + ;; + zkstack__help__dev__prover,insert-version) + cmd="zkstack__help__dev__prover__insert__version" + ;; + zkstack__help__dev__snapshot,create) + cmd="zkstack__help__dev__snapshot__create" + ;; + zkstack__help__dev__status,ports) + cmd="zkstack__help__dev__status__ports" + ;; + zkstack__help__dev__test,build) + cmd="zkstack__help__dev__test__build" + ;; + zkstack__help__dev__test,fees) + cmd="zkstack__help__dev__test__fees" + ;; + zkstack__help__dev__test,integration) + cmd="zkstack__help__dev__test__integration" + ;; + zkstack__help__dev__test,l1-contracts) + cmd="zkstack__help__dev__test__l1__contracts" + ;; + zkstack__help__dev__test,loadtest) + cmd="zkstack__help__dev__test__loadtest" + ;; + zkstack__help__dev__test,prover) + cmd="zkstack__help__dev__test__prover" + ;; + zkstack__help__dev__test,recovery) + cmd="zkstack__help__dev__test__recovery" + ;; + zkstack__help__dev__test,revert) + cmd="zkstack__help__dev__test__revert" + ;; + zkstack__help__dev__test,rust) + cmd="zkstack__help__dev__test__rust" + ;; + zkstack__help__dev__test,upgrade) + cmd="zkstack__help__dev__test__upgrade" + ;; + zkstack__help__dev__test,wallet) + cmd="zkstack__help__dev__test__wallet" + ;; + zkstack__help__ecosystem,build-transactions) + cmd="zkstack__help__ecosystem__build__transactions" + ;; + zkstack__help__ecosystem,change-default-chain) + cmd="zkstack__help__ecosystem__change__default__chain" + ;; + zkstack__help__ecosystem,create) + cmd="zkstack__help__ecosystem__create" + ;; + zkstack__help__ecosystem,init) + cmd="zkstack__help__ecosystem__init" + ;; + zkstack__help__ecosystem,setup-observability) + cmd="zkstack__help__ecosystem__setup__observability" + ;; + zkstack__help__explorer,init) + cmd="zkstack__help__explorer__init" + ;; + zkstack__help__explorer,run) + cmd="zkstack__help__explorer__run" + ;; + zkstack__help__explorer,run-backend) + cmd="zkstack__help__explorer__run__backend" + ;; + zkstack__help__external__node,configs) + cmd="zkstack__help__external__node__configs" + ;; + zkstack__help__external__node,init) + cmd="zkstack__help__external__node__init" + ;; + zkstack__help__external__node,run) + cmd="zkstack__help__external__node__run" + ;; + zkstack__help__prover,compressor-keys) + cmd="zkstack__help__prover__compressor__keys" + ;; + zkstack__help__prover,init) + cmd="zkstack__help__prover__init" + ;; + zkstack__help__prover,init-bellman-cuda) + cmd="zkstack__help__prover__init__bellman__cuda" + ;; + zkstack__help__prover,run) + cmd="zkstack__help__prover__run" + ;; + zkstack__help__prover,setup-keys) + cmd="zkstack__help__prover__setup__keys" + ;; + zkstack__prover,compressor-keys) + cmd="zkstack__prover__compressor__keys" + ;; + zkstack__prover,help) + cmd="zkstack__prover__help" + ;; + zkstack__prover,init) + cmd="zkstack__prover__init" + ;; + zkstack__prover,init-bellman-cuda) + cmd="zkstack__prover__init__bellman__cuda" + ;; + zkstack__prover,run) + cmd="zkstack__prover__run" + ;; + zkstack__prover,setup-keys) + cmd="zkstack__prover__setup__keys" + ;; + zkstack__prover__help,compressor-keys) + cmd="zkstack__prover__help__compressor__keys" + ;; + zkstack__prover__help,help) + cmd="zkstack__prover__help__help" + ;; + zkstack__prover__help,init) + cmd="zkstack__prover__help__init" + ;; + zkstack__prover__help,init-bellman-cuda) + cmd="zkstack__prover__help__init__bellman__cuda" + ;; + zkstack__prover__help,run) + cmd="zkstack__prover__help__run" + ;; + zkstack__prover__help,setup-keys) + cmd="zkstack__prover__help__setup__keys" + ;; + *) + ;; + esac + done + + case "${cmd}" in + zkstack) + opts="-v -h -V --verbose --chain --ignore-prerequisites --help --version autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 1 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__autocomplete) + opts="-o -v -h --generate --out --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --generate) + COMPREPLY=($(compgen -W "bash elvish fish powershell zsh" -- "${cur}")) + return 0 + ;; + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain) + opts="-v -h --verbose --chain --ignore-prerequisites --help create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__accept__chain__ownership) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__build__transactions) + opts="-o -a -v -h --out --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --l1-rpc-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__convert__to__gateway) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__create) + opts="-v -h --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --skip-submodules-checkout --skip-contract-compilation-override --evm-emulator --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-mode) + COMPREPLY=($(compgen -W "no-proofs gpu" -- "${cur}")) + return 0 + ;; + --wallet-creation) + COMPREPLY=($(compgen -W "localhost random empty in-file" -- "${cur}")) + return 0 + ;; + --wallet-path) + local oldifs + if [ -n "${IFS+x}" ]; then + oldifs="$IFS" + fi + IFS=$'\n' + COMPREPLY=($(compgen -f "${cur}")) + if [ -n "${oldifs+x}" ]; then + IFS="$oldifs" + fi + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o filenames + fi + return 0 + ;; + --l1-batch-commit-data-generator-mode) + COMPREPLY=($(compgen -W "rollup validium" -- "${cur}")) + return 0 + ;; + --base-token-address) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-nominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-denominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --set-as-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__consensus__registry) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__l2__contracts) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__multicall3) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__paymaster) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__deploy__upgrader) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis) + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --verbose --chain --ignore-prerequisites --help init-database server help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help) + opts="init-database server help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__help__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__init__database) + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__genesis__server) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help) + opts="create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__accept__chain__ownership) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__convert__to__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__consensus__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__l2__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__multicall3) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__paymaster) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__deploy__upgrader) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis) + opts="init-database server" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__genesis__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__init) + opts="configs" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__init__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__initialize__bridges) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__migrate__from__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__migrate__to__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__register__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__help__update__token__multiplier__setter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init) + opts="-a -d -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --server-db-url --server-db-name --dont-drop --deploy-paymaster --l1-rpc-url --no-port-reallocation --dev --skip-submodules-checkout --verbose --chain --ignore-prerequisites --help configs help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --deploy-paymaster) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__configs) + opts="-d -d -v -h --server-db-url --server-db-name --dev --dont-drop --l1-rpc-url --no-port-reallocation --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help) + opts="configs help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__init__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__initialize__bridges) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__migrate__from__gateway) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --gateway-chain-name --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --gateway-chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__migrate__to__gateway) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --gateway-chain-name --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --gateway-chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__register__chain) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__chain__update__token__multiplier__setter) + opts="-a -v -h --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus) + opts="-v -h --verbose --chain --ignore-prerequisites --help set-attester-committee get-attester-committee help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__get__attester__committee) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help) + opts="set-attester-committee get-attester-committee help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__get__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__help__set__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__consensus__set__attester__committee) + opts="-v -h --from-genesis --from-file --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --from-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__containers) + opts="-o -v -h --observability --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --observability) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier) + opts="-v -h --verbose --chain --ignore-prerequisites --help run init help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help) + opts="run init help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__init) + opts="-v -h --zksolc-version --zkvyper-version --solc-version --era-vm-solc-version --vyper-version --only --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --zksolc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --zkvyper-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --solc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --era-vm-solc-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --vyper-version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__contract__verifier__run) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev) + opts="-v -h --verbose --chain --ignore-prerequisites --help database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean) + opts="-v -h --verbose --chain --ignore-prerequisites --help all containers contracts-cache help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__all) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__containers) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__contracts__cache) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help) + opts="all containers contracts-cache help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__clean__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__config__writer) + opts="-p -v -h --path --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__contracts) + opts="-v -h --l1-contracts --l1-da-contracts --l2-contracts --system-contracts --test-contracts --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --l1-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l1-da-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --l2-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --system-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --test-contracts) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database) + opts="-v -h --verbose --chain --ignore-prerequisites --help check-sqlx-data drop migrate new-migration prepare reset setup help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__check__sqlx__data) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__drop) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help) + opts="check-sqlx-data drop migrate new-migration prepare reset setup help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__help__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__migrate) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__new__migration) + opts="-v -h --database --name --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --database) + COMPREPLY=($(compgen -W "prover core" -- "${cur}")) + return 0 + ;; + --name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__prepare) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__reset) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__database__setup) + opts="-p -c -v -h --prover --prover-url --core --core-url --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --prover) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -p) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --core) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -c) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --core-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt) + opts="-c -v -h --check --verbose --chain --ignore-prerequisites --help rustfmt contract prettier help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__contract) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help) + opts="rustfmt contract prettier help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__help__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__prettier) + opts="-t -v -h --targets --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --targets) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__fmt__rustfmt) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__generate__genesis) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help) + opts="database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean) + opts="all containers contracts-cache" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__clean__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__config__writer) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database) + opts="check-sqlx-data drop migrate new-migration prepare reset setup" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__database__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt) + opts="rustfmt contract prettier" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__fmt__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__generate__genesis) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__lint) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover) + opts="info insert-batch insert-version" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__prover__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__send__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__snapshot) + opts="create" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__snapshot__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__status) + opts="ports" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__status__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__help__test__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__lint) + opts="-c -t -v -h --check --targets --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --targets) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -W "md sol js ts rs contracts autocompletion" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help info insert-batch insert-version help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help) + opts="info insert-batch insert-version help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__help__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__info) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__insert__batch) + opts="-v -h --number --default --version --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --number) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__prover__insert__version) + opts="-v -h --default --version --snark-wrapper --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --version) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --snark-wrapper) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__send__transactions) + opts="-v -h --file --private-key --l1-rpc-url --confirmations --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --private-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --confirmations) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot) + opts="-v -h --verbose --chain --ignore-prerequisites --help create help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__create) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help) + opts="create help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__snapshot__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status) + opts="-u -v -h --url --verbose --chain --ignore-prerequisites --help ports help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -u) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help) + opts="ports help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__help__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__status__ports) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test) + opts="-v -h --verbose --chain --ignore-prerequisites --help integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__build) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__fees) + opts="-n -v -h --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__help__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__integration) + opts="-e -n -t -v -h --external-node --no-deps --test-pattern --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --test-pattern) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -t) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__l1__contracts) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__loadtest) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__recovery) + opts="-s -n -v -h --snapshot --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__revert) + opts="-e -n -v -h --enable-consensus --external-node --no-deps --no-kill --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__rust) + opts="-v -h --options --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --options) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__upgrade) + opts="-n -v -h --no-deps --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__dev__test__wallet) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem) + opts="-v -h --verbose --chain --ignore-prerequisites --help create build-transactions init change-default-chain setup-observability help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__build__transactions) + opts="-o -a -v -h --sender --l1-rpc-url --out --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --sender) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --out) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__change__default__chain) + opts="-v -h --verbose --chain --ignore-prerequisites --help [NAME]" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__create) + opts="-v -h --ecosystem-name --l1-network --link-to-code --chain-name --chain-id --prover-mode --wallet-creation --wallet-path --l1-batch-commit-data-generator-mode --base-token-address --base-token-price-nominator --base-token-price-denominator --set-as-default --legacy-bridge --skip-submodules-checkout --skip-contract-compilation-override --evm-emulator --start-containers --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --ecosystem-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-network) + COMPREPLY=($(compgen -W "localhost sepolia holesky mainnet" -- "${cur}")) + return 0 + ;; + --link-to-code) + COMPREPLY=() + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o plusdirs + fi + return 0 + ;; + --chain-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-mode) + COMPREPLY=($(compgen -W "no-proofs gpu" -- "${cur}")) + return 0 + ;; + --wallet-creation) + COMPREPLY=($(compgen -W "localhost random empty in-file" -- "${cur}")) + return 0 + ;; + --wallet-path) + local oldifs + if [ -n "${IFS+x}" ]; then + oldifs="$IFS" + fi + IFS=$'\n' + COMPREPLY=($(compgen -f "${cur}")) + if [ -n "${oldifs+x}" ]; then + IFS="$oldifs" + fi + if [[ "${BASH_VERSINFO[0]}" -ge 4 ]]; then + compopt -o filenames + fi + return 0 + ;; + --l1-batch-commit-data-generator-mode) + COMPREPLY=($(compgen -W "rollup validium" -- "${cur}")) + return 0 + ;; + --base-token-address) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-nominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --base-token-price-denominator) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --set-as-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --evm-emulator) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --start-containers) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help) + opts="create build-transactions init change-default-chain setup-observability help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__change__default__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__help__setup__observability) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__init) + opts="-a -d -o -v -h --deploy-erc20 --deploy-ecosystem --ecosystem-contracts-path --l1-rpc-url --verify --verifier --verifier-url --verifier-api-key --resume --zksync --additional-args --deploy-paymaster --server-db-url --server-db-name --dont-drop --ecosystem-only --dev --observability --no-port-reallocation --skip-submodules-checkout --skip-contract-compilation-override --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --deploy-erc20) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --deploy-ecosystem) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --ecosystem-contracts-path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verify) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --verifier) + COMPREPLY=($(compgen -W "etherscan sourcify blockscout oklink" -- "${cur}")) + return 0 + ;; + --verifier-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --verifier-api-key) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --deploy-paymaster) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --server-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --server-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --observability) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -o) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__ecosystem__setup__observability) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer) + opts="-v -h --verbose --chain --ignore-prerequisites --help init run-backend run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help) + opts="init run-backend run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__help__run__backend) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__init) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__run) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__explorer__run__backend) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node) + opts="-v -h --verbose --chain --ignore-prerequisites --help configs init run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__configs) + opts="-u -v -h --db-url --db-name --l1-rpc-url --use-default --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --l1-rpc-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help) + opts="configs init run help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__init) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__external__node__run) + opts="-a -v -h --reinit --components --enable-consensus --additional-args --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --components) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --enable-consensus) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help) + opts="autocomplete ecosystem chain dev prover server external-node containers contract-verifier portal explorer consensus update markdown help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__autocomplete) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain) + opts="create build-transactions init genesis register-chain deploy-l2-contracts accept-chain-ownership initialize-bridges deploy-consensus-registry deploy-multicall3 deploy-upgrader deploy-paymaster update-token-multiplier-setter convert-to-gateway migrate-to-gateway migrate-from-gateway" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__accept__chain__ownership) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__convert__to__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__consensus__registry) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__l2__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__multicall3) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__paymaster) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__deploy__upgrader) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis) + opts="init-database server" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis__init__database) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__genesis__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__init) + opts="configs" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__init__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__initialize__bridges) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__migrate__from__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__migrate__to__gateway) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__register__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__chain__update__token__multiplier__setter) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus) + opts="set-attester-committee get-attester-committee" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus__get__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__consensus__set__attester__committee) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier) + opts="run init" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__contract__verifier__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev) + opts="database test clean snapshot lint fmt prover contracts config-writer send-transactions status generate-genesis" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean) + opts="all containers contracts-cache" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__all) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__containers) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__clean__contracts__cache) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__config__writer) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database) + opts="check-sqlx-data drop migrate new-migration prepare reset setup" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__check__sqlx__data) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__drop) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__migrate) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__new__migration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__prepare) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__reset) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__database__setup) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt) + opts="rustfmt contract prettier" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__contract) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__prettier) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__fmt__rustfmt) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__generate__genesis) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__lint) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover) + opts="info insert-batch insert-version" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__info) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__insert__batch) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__prover__insert__version) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__send__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__snapshot) + opts="create" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__snapshot__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__status) + opts="ports" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__status__ports) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test) + opts="integration fees revert recovery upgrade build rust l1-contracts prover wallet loadtest" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__build) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__fees) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__integration) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__l1__contracts) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__loadtest) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__prover) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__recovery) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__revert) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__rust) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__upgrade) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__dev__test__wallet) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 5 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem) + opts="create build-transactions init change-default-chain setup-observability" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__build__transactions) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__change__default__chain) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__create) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__ecosystem__setup__observability) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer) + opts="init run-backend run" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__explorer__run__backend) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node) + opts="configs init run" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__configs) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__external__node__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__markdown) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__portal) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover) + opts="init setup-keys run init-bellman-cuda compressor-keys" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__compressor__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__init__bellman__cuda) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__prover__setup__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__server) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__help__update) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__markdown) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__portal) + opts="-v -h --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover) + opts="-v -h --verbose --chain --ignore-prerequisites --help init setup-keys run init-bellman-cuda compressor-keys help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__compressor__keys) + opts="-v -h --path --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help) + opts="init setup-keys run init-bellman-cuda compressor-keys help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__compressor__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__help) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__init) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__init__bellman__cuda) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__run) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__help__setup__keys) + opts="" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 4 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__init) + opts="-u -d -v -h --dev --proof-store-dir --bucket-base-url --credentials-file --bucket-name --location --project-id --shall-save-to-public-bucket --public-store-dir --public-bucket-base-url --public-credentials-file --public-bucket-name --public-location --public-project-id --clone --bellman-cuda-dir --bellman-cuda --setup-compressor-key --path --region --mode --setup-keys --setup-database --prover-db-url --prover-db-name --use-default --dont-drop --cloud-type --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --proof-store-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bucket-base-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --credentials-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bucket-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --location) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --project-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --shall-save-to-public-bucket) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --public-store-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-bucket-base-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-credentials-file) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-bucket-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-location) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --public-project-id) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bellman-cuda-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --bellman-cuda) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --setup-compressor-key) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --path) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --region) + COMPREPLY=($(compgen -W "us europe asia" -- "${cur}")) + return 0 + ;; + --mode) + COMPREPLY=($(compgen -W "download generate" -- "${cur}")) + return 0 + ;; + --setup-keys) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --setup-database) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --prover-db-url) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --prover-db-name) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --use-default) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -u) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --dont-drop) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + -d) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --cloud-type) + COMPREPLY=($(compgen -W "gcp local" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__init__bellman__cuda) + opts="-v -h --clone --bellman-cuda-dir --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --bellman-cuda-dir) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__run) + opts="-v -h --component --round --threads --max-allocation --witness-vector-generator-count --max-allocation --docker --tag --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --component) + COMPREPLY=($(compgen -W "gateway witness-generator witness-vector-generator prover circuit-prover compressor prover-job-monitor" -- "${cur}")) + return 0 + ;; + --round) + COMPREPLY=($(compgen -W "all-rounds basic-circuits leaf-aggregation node-aggregation recursion-tip scheduler" -- "${cur}")) + return 0 + ;; + --threads) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --max-allocation) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --witness-vector-generator-count) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --max-allocation) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --docker) + COMPREPLY=($(compgen -W "true false" -- "${cur}")) + return 0 + ;; + --tag) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__prover__setup__keys) + opts="-v -h --region --mode --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 3 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --region) + COMPREPLY=($(compgen -W "us europe asia" -- "${cur}")) + return 0 + ;; + --mode) + COMPREPLY=($(compgen -W "download generate" -- "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__server) + opts="-a -v -h --components --genesis --additional-args --build --uring --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --components) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --additional-args) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + -a) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + zkstack__update) + opts="-c -v -h --only-config --verbose --chain --ignore-prerequisites --help" + if [[ ${cur} == -* || ${COMP_CWORD} -eq 2 ]] ; then + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + fi + case "${prev}" in + --chain) + COMPREPLY=($(compgen -f "${cur}")) + return 0 + ;; + *) + COMPREPLY=() + ;; + esac + COMPREPLY=( $(compgen -W "${opts}" -- "${cur}") ) + return 0 + ;; + esac +} + +if [[ "${BASH_VERSINFO[0]}" -eq 4 && "${BASH_VERSINFO[1]}" -ge 4 || "${BASH_VERSINFO[0]}" -gt 4 ]]; then + complete -F _zkstack -o nosort -o bashdefault -o default zkstack +else + complete -F _zkstack -o bashdefault -o default zkstack +fi diff --git a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs b/zkstack_cli/crates/zkstack/src/accept_ownership.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/accept_ownership.rs rename to zkstack_cli/crates/zkstack/src/accept_ownership.rs index a41246e1de0..e1655921345 100644 --- a/zk_toolbox/crates/zk_inception/src/accept_ownership.rs +++ b/zkstack_cli/crates/zkstack/src/accept_ownership.rs @@ -1,13 +1,10 @@ use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, spinner::Spinner, + wallets::Wallet, }; use config::{forge_interface::script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, EcosystemConfig}; -use ethers::{ - abi::parse_abi, - contract::BaseContract, - types::{Address, H256}, -}; +use ethers::{abi::parse_abi, contract::BaseContract, types::Address}; use lazy_static::lazy_static; use xshell::Shell; @@ -31,7 +28,7 @@ pub async fn accept_admin( shell: &Shell, ecosystem_config: &EcosystemConfig, admin: Address, - governor: Option, + governor: &Wallet, target_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, @@ -62,7 +59,7 @@ pub async fn accept_owner( shell: &Shell, ecosystem_config: &EcosystemConfig, governor_contract: Address, - governor: Option, + governor: &Wallet, target_address: Address, forge_args: &ForgeScriptArgs, l1_rpc_url: String, @@ -92,7 +89,7 @@ pub async fn set_da_validator_pair( shell: &Shell, ecosystem_config: &EcosystemConfig, chain_admin_addr: Address, - governor: Option, + governor: &Wallet, diamond_proxy_address: Address, l1_da_validator_address: Address, l2_da_validator_address: Address, @@ -129,10 +126,10 @@ pub async fn set_da_validator_pair( async fn accept_ownership( shell: &Shell, - governor: Option, + governor: &Wallet, mut forge: ForgeScript, ) -> anyhow::Result<()> { - forge = fill_forge_private_key(forge, governor)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; let spinner = Spinner::new(MSG_ACCEPTING_GOVERNANCE_SPINNER); forge.run(shell)?; diff --git a/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs b/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs new file mode 100644 index 00000000000..8e44d644f39 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/args/autocomplete.rs @@ -0,0 +1,13 @@ +use std::path::PathBuf; + +use clap::Parser; + +#[derive(Debug, Parser)] +pub struct AutocompleteArgs { + /// The shell to generate the autocomplete script for + #[arg(long = "generate", value_enum)] + pub generator: clap_complete::Shell, + /// The out directory to write the autocomplete script to + #[arg(short, long, default_value = "./")] + pub out: PathBuf, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/containers.rs b/zkstack_cli/crates/zkstack/src/commands/args/containers.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/containers.rs rename to zkstack_cli/crates/zkstack/src/commands/args/containers.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs similarity index 71% rename from zk_toolbox/crates/zk_inception/src/commands/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/args/mod.rs index d18b05c910e..5fa83aadf51 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/args/mod.rs @@ -1,7 +1,9 @@ +pub use autocomplete::*; pub use containers::*; pub use run_server::*; pub use update::*; +mod autocomplete; mod containers; mod run_server; mod update; diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs b/zkstack_cli/crates/zkstack/src/commands/args/run_server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs rename to zkstack_cli/crates/zkstack/src/commands/args/run_server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/update.rs b/zkstack_cli/crates/zkstack/src/commands/args/update.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/args/update.rs rename to zkstack_cli/crates/zkstack/src/commands/args/update.rs diff --git a/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs b/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs new file mode 100644 index 00000000000..0f2105cd5ef --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/autocomplete.rs @@ -0,0 +1,52 @@ +use std::{ + fs::File, + io::{BufWriter, Write}, +}; + +use anyhow::Context; +use clap::CommandFactory; +use clap_complete::{generate, Generator}; +use common::logger; + +use super::args::AutocompleteArgs; +use crate::{ + messages::{msg_generate_autocomplete_file, MSG_OUTRO_AUTOCOMPLETE_GENERATION}, + ZkStack, +}; + +pub fn run(args: AutocompleteArgs) -> anyhow::Result<()> { + let filename = autocomplete_file_name(&args.generator); + let path = args.out.join(filename); + + logger::info(msg_generate_autocomplete_file( + path.to_str() + .context("the output file path is an invalid UTF8 string")?, + )); + + let file = File::create(path).context("Failed to create file")?; + let mut writer = BufWriter::new(file); + + generate_completions(args.generator, &mut writer)?; + + logger::outro(MSG_OUTRO_AUTOCOMPLETE_GENERATION); + + Ok(()) +} + +pub fn generate_completions(gen: G, buf: &mut dyn Write) -> anyhow::Result<()> { + let mut cmd = ZkStack::command(); + let cmd_name = cmd.get_name().to_string(); + + generate(gen, &mut cmd, cmd_name, buf); + + Ok(()) +} + +pub fn autocomplete_file_name(shell: &clap_complete::Shell) -> &'static str { + match shell { + clap_complete::Shell::Bash => "zkstack.sh", + clap_complete::Shell::Fish => "zkstack.fish", + clap_complete::Shell::Zsh => "_zkstack.zsh", + _ => todo!(), + } +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs b/zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs new file mode 100644 index 00000000000..cf3e2981b3c --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/accept_chain_ownership.rs @@ -0,0 +1,42 @@ +use anyhow::Context; +use common::{forge::ForgeScriptArgs, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::Shell; + +use crate::{ + accept_ownership::accept_admin, + messages::{ + MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_NOT_INITIALIZED, MSG_CHAIN_OWNERSHIP_TRANSFERRED, + MSG_L1_SECRETS_MUST_BE_PRESENTED, + }, +}; + +pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let contracts = chain_config.get_contracts_config()?; + let secrets = chain_config.get_secrets_config()?; + let l1_rpc_url = secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + + let spinner = Spinner::new(MSG_ACCEPTING_ADMIN_SPINNER); + accept_admin( + shell, + &ecosystem_config, + contracts.l1.chain_admin_addr, + &chain_config.get_wallets_config()?.governor, + contracts.l1.diamond_proxy_addr, + &args, + l1_rpc_url.clone(), + ) + .await?; + spinner.finish(); + logger::success(MSG_CHAIN_OWNERSHIP_TRANSFERRED); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs similarity index 87% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs index 255fe05de59..b62984ce9e6 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/create.rs @@ -1,14 +1,22 @@ -use std::{path::PathBuf, str::FromStr}; +use std::{ + path::{Path, PathBuf}, + str::FromStr, +}; use anyhow::{bail, Context}; -use clap::{Parser, ValueEnum}; +use clap::{Parser, ValueEnum, ValueHint}; use common::{Prompt, PromptConfirm, PromptSelect}; -use config::forge_interface::deploy_ecosystem::output::Erc20Token; +use config::{ + forge_interface::deploy_ecosystem::output::Erc20Token, traits::ReadConfigWithBasePath, + EcosystemConfig, +}; use serde::{Deserialize, Serialize}; use slugify_rs::slugify; use strum::{Display, EnumIter, IntoEnumIterator}; use types::{BaseToken, L1BatchCommitmentMode, L1Network, ProverMode, WalletCreation}; +use xshell::Shell; use zksync_basic_types::H160; +use zksync_config::GenesisConfig; use crate::{ defaults::L2_CHAIN_ID, @@ -18,6 +26,7 @@ use crate::{ MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT, MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP, MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT, MSG_BASE_TOKEN_SELECTION_PROMPT, MSG_CHAIN_ID_HELP, MSG_CHAIN_ID_PROMPT, MSG_CHAIN_ID_VALIDATOR_ERR, MSG_CHAIN_NAME_PROMPT, + MSG_EVM_EMULATOR_HASH_MISSING_ERR, MSG_EVM_EMULATOR_HELP, MSG_EVM_EMULATOR_PROMPT, MSG_L1_BATCH_COMMIT_DATA_GENERATOR_MODE_PROMPT, MSG_L1_COMMIT_DATA_GENERATOR_MODE_HELP, MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR, MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR, MSG_PROVER_MODE_HELP, MSG_PROVER_VERSION_PROMPT, MSG_SET_AS_DEFAULT_HELP, @@ -53,7 +62,7 @@ pub struct ChainCreateArgs { prover_mode: Option, #[clap(long, help = MSG_WALLET_CREATION_HELP, value_enum)] wallet_creation: Option, - #[clap(long, help = MSG_WALLET_PATH_HELP)] + #[clap(long, help = MSG_WALLET_PATH_HELP, value_hint = ValueHint::FilePath)] wallet_path: Option, #[clap(long, help = MSG_L1_COMMIT_DATA_GENERATOR_MODE_HELP)] l1_batch_commit_data_generator_mode: Option, @@ -79,14 +88,18 @@ pub struct ChainCreateArgs { default_missing_value = "true" )] pub skip_contract_compilation_override: bool, + #[arg(long, help = MSG_EVM_EMULATOR_HELP, default_missing_value = "true", num_args = 0..=1)] + evm_emulator: Option, } impl ChainCreateArgs { pub fn fill_values_with_prompt( self, + shell: &Shell, number_of_chains: u32, l1_network: &L1Network, possible_erc20: Vec, + link_to_code: &Path, ) -> anyhow::Result { let mut chain_name = self .chain_name @@ -139,7 +152,7 @@ impl ChainCreateArgs { .ask() }); - let wallet_path: Option = if self.wallet_creation == Some(WalletCreation::InFile) { + let wallet_path: Option = if wallet_creation == WalletCreation::InFile { Some(self.wallet_path.unwrap_or_else(|| { Prompt::new(MSG_WALLET_PATH_PROMPT) .validate_with(|val: &String| { @@ -223,6 +236,25 @@ impl ChainCreateArgs { } }; + let default_genesis_config = GenesisConfig::read_with_base_path( + shell, + EcosystemConfig::default_configs_path(link_to_code), + ) + .context("failed reading genesis config")?; + let has_evm_emulation_support = default_genesis_config.evm_emulator_hash.is_some(); + let evm_emulator = self.evm_emulator.unwrap_or_else(|| { + if !has_evm_emulation_support { + false + } else { + PromptConfirm::new(MSG_EVM_EMULATOR_PROMPT) + .default(false) + .ask() + } + }); + if !has_evm_emulation_support && evm_emulator { + bail!(MSG_EVM_EMULATOR_HASH_MISSING_ERR); + } + let set_as_default = self.set_as_default.unwrap_or_else(|| { PromptConfirm::new(MSG_SET_AS_DEFAULT_PROMPT) .default(true) @@ -241,6 +273,7 @@ impl ChainCreateArgs { legacy_bridge: self.legacy_bridge, skip_submodules_checkout: self.skip_submodules_checkout, skip_contract_compilation_override: self.skip_contract_compilation_override, + evm_emulator, }) } } @@ -258,6 +291,7 @@ pub struct ChainCreateArgsFinal { pub legacy_bridge: bool, pub skip_submodules_checkout: bool, pub skip_contract_compilation_override: bool, + pub evm_emulator: bool, } #[derive(Debug, Clone, EnumIter, Display, PartialEq, Eq)] diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs similarity index 55% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs index 483b78e9b26..f990cbfd77d 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/genesis.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use clap::Parser; use common::{db::DatabaseConfig, Prompt}; use config::ChainConfig; @@ -6,11 +7,10 @@ use slugify_rs::slugify; use url::Url; use crate::{ - defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL, DATABASE_SERVER_URL}, + defaults::{generate_db_names, DBNames, DATABASE_SERVER_URL}, messages::{ - msg_prover_db_name_prompt, msg_prover_db_url_prompt, msg_server_db_name_prompt, - msg_server_db_url_prompt, MSG_PROVER_DB_NAME_HELP, MSG_PROVER_DB_URL_HELP, - MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, + msg_server_db_name_prompt, msg_server_db_url_prompt, MSG_SERVER_DB_NAME_HELP, + MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, }, }; @@ -20,27 +20,19 @@ pub struct GenesisArgs { pub server_db_url: Option, #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] pub server_db_name: Option, - #[clap(long, help = MSG_PROVER_DB_URL_HELP)] - pub prover_db_url: Option, - #[clap(long, help = MSG_PROVER_DB_NAME_HELP)] - pub prover_db_name: Option, #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] - pub use_default: bool, + pub dev: bool, #[clap(long, short, action)] pub dont_drop: bool, } impl GenesisArgs { pub fn fill_values_with_prompt(self, config: &ChainConfig) -> GenesisArgsFinal { - let DBNames { - server_name, - prover_name, - } = generate_db_names(config); + let DBNames { server_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); - if self.use_default { + if self.dev { GenesisArgsFinal { server_db: DatabaseConfig::new(DATABASE_SERVER_URL.clone(), server_name), - prover_db: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop: self.dont_drop, } } else { @@ -57,31 +49,44 @@ impl GenesisArgs { }), separator = "_" ); - let prover_db_url = self.prover_db_url.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_url_prompt(&chain_name)) - .default(DATABASE_PROVER_URL.as_str()) - .ask() - }); - let prover_db_name = slugify!( - &self.prover_db_name.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_name_prompt(&chain_name)) - .default(&prover_name) - .ask() - }), - separator = "_" - ); GenesisArgsFinal { server_db: DatabaseConfig::new(server_db_url, server_db_name), - prover_db: DatabaseConfig::new(prover_db_url, prover_db_name), dont_drop: self.dont_drop, } } } + + pub fn fill_values_with_secrets( + mut self, + chain_config: &ChainConfig, + ) -> anyhow::Result { + let secrets = chain_config.get_secrets_config()?; + let database = secrets + .database + .context("Database secrets must be present")?; + + let (server_db_url, server_db_name) = if let Some(db_full_url) = database.server_url { + let db_config = DatabaseConfig::from_url(db_full_url.expose_url()) + .context("Invalid server database URL")?; + (Some(db_config.url), Some(db_config.name)) + } else { + (None, None) + }; + + self.server_db_url = self.server_db_url.or(server_db_url); + self.server_db_name = self.server_db_name.or(server_db_name); + + Ok(self.fill_values_with_prompt(chain_config)) + } + + pub fn reset_db_names(&mut self) { + self.server_db_name = None; + self.server_db_url = None; + } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GenesisArgsFinal { pub server_db: DatabaseConfig, - pub prover_db: DatabaseConfig, pub dont_drop: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs similarity index 50% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs index fbdd71a7724..b34809643cf 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/configs.rs @@ -1,51 +1,42 @@ use clap::Parser; -use common::{forge::ForgeScriptArgs, Prompt}; +use common::Prompt; use config::ChainConfig; use serde::{Deserialize, Serialize}; use types::L1Network; use url::Url; -use super::genesis::GenesisArgsFinal; use crate::{ - commands::chain::args::genesis::GenesisArgs, + commands::chain::args::{ + genesis::{GenesisArgs, GenesisArgsFinal}, + init::InitArgsFinal, + }, defaults::LOCAL_RPC_URL, messages::{ - MSG_DEPLOY_PAYMASTER_PROMPT, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, - MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, + MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, + MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, }, }; #[derive(Debug, Clone, Serialize, Deserialize, Parser)] -pub struct InitArgs { - /// All ethereum environment related arguments - #[clap(flatten)] - #[serde(flatten)] - pub forge_args: ForgeScriptArgs, +pub struct InitConfigsArgs { #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] #[serde(flatten)] pub genesis_args: GenesisArgs, - #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub deploy_paymaster: Option, #[clap(long, help = MSG_L1_RPC_URL_HELP)] pub l1_rpc_url: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, - #[clap( - long, - help = "Skip submodules checkout", - default_missing_value = "true" - )] - pub skip_submodules_checkout: bool, } -impl InitArgs { - pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitArgsFinal { - let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { - common::PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) - .default(true) - .ask() - }); +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct InitConfigsArgsFinal { + pub genesis_args: GenesisArgsFinal, + pub l1_rpc_url: String, + pub no_port_reallocation: bool, +} +impl InitConfigsArgs { + pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitConfigsArgsFinal { let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); if config.l1_network == L1Network::Localhost { @@ -60,23 +51,20 @@ impl InitArgs { .ask() }); - InitArgsFinal { - forge_args: self.forge_args, + InitConfigsArgsFinal { genesis_args: self.genesis_args.fill_values_with_prompt(config), - deploy_paymaster, l1_rpc_url, no_port_reallocation: self.no_port_reallocation, - skip_submodules_checkout: self.skip_submodules_checkout, } } } -#[derive(Debug, Serialize, Deserialize, Clone)] -pub struct InitArgsFinal { - pub forge_args: ForgeScriptArgs, - pub genesis_args: GenesisArgsFinal, - pub deploy_paymaster: bool, - pub l1_rpc_url: String, - pub no_port_reallocation: bool, - pub skip_submodules_checkout: bool, +impl InitConfigsArgsFinal { + pub fn from_chain_init_args(init_args: &InitArgsFinal) -> InitConfigsArgsFinal { + InitConfigsArgsFinal { + genesis_args: init_args.genesis_args.clone(), + l1_rpc_url: init_args.l1_rpc_url.clone(), + no_port_reallocation: init_args.no_port_reallocation, + } + } } diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs new file mode 100644 index 00000000000..b2697db6377 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/args/init/mod.rs @@ -0,0 +1,110 @@ +use clap::Parser; +use common::{forge::ForgeScriptArgs, Prompt}; +use config::ChainConfig; +use serde::{Deserialize, Serialize}; +use types::L1Network; +use url::Url; + +use crate::{ + commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, + defaults::LOCAL_RPC_URL, + messages::{ + MSG_DEPLOY_PAYMASTER_PROMPT, MSG_DEV_ARG_HELP, MSG_L1_RPC_URL_HELP, + MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, + MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, + }, +}; + +pub mod configs; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser)] +pub struct InitArgs { + /// All ethereum environment related arguments + #[clap(flatten)] + #[serde(flatten)] + pub forge_args: ForgeScriptArgs, + #[clap(long, help = MSG_SERVER_DB_URL_HELP)] + pub server_db_url: Option, + #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] + pub server_db_name: Option, + #[clap(long, short, action)] + pub dont_drop: bool, + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub deploy_paymaster: Option, + #[clap(long, help = MSG_L1_RPC_URL_HELP)] + pub l1_rpc_url: Option, + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] + pub no_port_reallocation: bool, + #[clap(long, help = MSG_DEV_ARG_HELP)] + pub dev: bool, + #[clap( + long, + help = "Skip submodules checkout", + default_missing_value = "true" + )] + pub skip_submodules_checkout: bool, +} + +impl InitArgs { + pub fn get_genesis_args(&self) -> GenesisArgs { + GenesisArgs { + server_db_url: self.server_db_url.clone(), + server_db_name: self.server_db_name.clone(), + dev: self.dev, + dont_drop: self.dont_drop, + } + } + + pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitArgsFinal { + let genesis = self.get_genesis_args(); + + let deploy_paymaster = if self.dev { + true + } else { + self.deploy_paymaster.unwrap_or_else(|| { + common::PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) + .default(true) + .ask() + }) + }; + + let l1_rpc_url = if self.dev { + LOCAL_RPC_URL.to_string() + } else { + self.l1_rpc_url.unwrap_or_else(|| { + let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); + if config.l1_network == L1Network::Localhost { + prompt = prompt.default(LOCAL_RPC_URL); + } + prompt + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }) + }; + + InitArgsFinal { + forge_args: self.forge_args, + genesis_args: genesis.fill_values_with_prompt(config), + deploy_paymaster, + l1_rpc_url, + no_port_reallocation: self.no_port_reallocation, + dev: self.dev, + skip_submodules_checkout: self.skip_submodules_checkout, + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct InitArgsFinal { + pub forge_args: ForgeScriptArgs, + pub genesis_args: GenesisArgsFinal, + pub deploy_paymaster: bool, + pub l1_rpc_url: String, + pub no_port_reallocation: bool, + pub dev: bool, + pub skip_submodules_checkout: bool, +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs index 98b2e226cc1..d3953c65659 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/build_transactions.rs @@ -6,9 +6,10 @@ use config::{ use ethers::utils::hex::ToHex; use xshell::Shell; -use super::common::register_chain; use crate::{ - commands::chain::args::build_transactions::BuildTransactionsArgs, + commands::chain::{ + args::build_transactions::BuildTransactionsArgs, register_chain::register_chain, + }, messages::{ MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER, MSG_CHAIN_NOT_FOUND_ERR, MSG_CHAIN_TRANSACTIONS_BUILT, MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG, @@ -41,7 +42,7 @@ pub(crate) async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::R logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, &chain_config); + update_from_chain_config(&mut genesis_config, &chain_config)?; // Copy ecosystem contracts let mut contracts_config = config diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs similarity index 58% rename from zk_toolbox/crates/zk_inception/src/commands/chain/common.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/common.rs index 3d8b887a3a9..e0aa0b4e047 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/common.rs @@ -1,66 +1,12 @@ -use common::{ - forge::{Forge, ForgeScriptArgs}, - spinner::Spinner, -}; -use config::{ - forge_interface::{ - register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, - script_params::REGISTER_CHAIN_SCRIPT_PARAMS, - }, - traits::{ReadConfig, SaveConfig}, - ChainConfig, ContractsConfig, EcosystemConfig, -}; +use common::spinner::Spinner; +use config::{ChainConfig, EcosystemConfig}; use types::{BaseToken, L1Network, WalletCreation}; -use xshell::Shell; use crate::{ consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{MSG_DISTRIBUTING_ETH_SPINNER, MSG_MINT_BASE_TOKEN_SPINNER}, - utils::forge::{check_the_balance, fill_forge_private_key}, }; -#[allow(clippy::too_many_arguments)] -pub async fn register_chain( - shell: &Shell, - forge_args: ForgeScriptArgs, - config: &EcosystemConfig, - chain_config: &ChainConfig, - contracts: &mut ContractsConfig, - l1_rpc_url: String, - sender: Option, - broadcast: bool, -) -> anyhow::Result<()> { - let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); - - let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; - deploy_config.save(shell, deploy_config_path)?; - - let mut forge = Forge::new(&config.path_to_l1_foundry()) - .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) - .with_ffi() - .with_rpc_url(l1_rpc_url); - - if broadcast { - forge = forge.with_broadcast(); - } - - if let Some(address) = sender { - forge = forge.with_sender(address); - } else { - forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; - check_the_balance(&forge).await?; - } - - forge.run(shell)?; - - let register_chain_output = RegisterChainOutput::read( - shell, - REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - contracts.set_chain_contracts(®ister_chain_output); - Ok(()) -} - // Distribute eth to the chain wallets for localhost environment pub async fn distribute_eth( ecosystem_config: &EcosystemConfig, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/convert_to_gateway.rs b/zkstack_cli/crates/zkstack/src/commands/chain/convert_to_gateway.rs similarity index 95% rename from zk_toolbox/crates/zk_inception/src/commands/chain/convert_to_gateway.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/convert_to_gateway.rs index 5e3021ba0f5..fd5b7f5414f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/convert_to_gateway.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/convert_to_gateway.rs @@ -2,6 +2,7 @@ use anyhow::Context; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, + wallets::Wallet, }; use config::{ forge_interface::{ @@ -92,7 +93,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { .unwrap(), &ecosystem_config, &chain_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url, ) .await?; @@ -132,7 +133,7 @@ async fn deploy_gateway_ctm( .with_broadcast(); // Governor private key should not be needed for this script - forge = fill_forge_private_key(forge, config.get_wallets()?.deployer_private_key())?; + forge = fill_forge_private_key(forge, config.get_wallets()?.deployer.as_ref())?; check_the_balance(&forge).await?; forge.run(shell)?; @@ -162,7 +163,7 @@ async fn gateway_governance_whitelisting( .unwrap(), config, chain_config, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, l1_rpc_url.clone(), ) .await? @@ -184,7 +185,7 @@ async fn gateway_governance_whitelisting( .unwrap(), config, chain_config, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, l1_rpc_url.clone(), ) .await? @@ -204,7 +205,7 @@ async fn gateway_governance_whitelisting( .unwrap(), config, chain_config, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, l1_rpc_url.clone(), ) .await? @@ -227,7 +228,7 @@ async fn gateway_governance_whitelisting( .unwrap(), config, chain_config, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, l1_rpc_url.clone(), ) .await? @@ -248,7 +249,7 @@ async fn call_script( data: &Bytes, config: &EcosystemConfig, chain_config: &ChainConfig, - private_key: Option, + governor: &Wallet, l1_rpc_url: String, ) -> anyhow::Result { let mut forge = Forge::new(&config.path_to_l1_foundry()) @@ -259,7 +260,7 @@ async fn call_script( .with_calldata(data); // Governor private key is required for this script - forge = fill_forge_private_key(forge, private_key)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs similarity index 97% rename from zk_toolbox/crates/zk_inception/src/commands/chain/create.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/create.rs index 48a320ec27e..bdf5711e321 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/create.rs @@ -30,9 +30,11 @@ fn create( let tokens = ecosystem_config.get_erc20_tokens(); let args = args .fill_values_with_prompt( + shell, ecosystem_config.list_of_chains().len() as u32, &ecosystem_config.l1_network, tokens, + &ecosystem_config.link_to_code, ) .context(MSG_ARGS_VALIDATOR_ERR)?; @@ -89,6 +91,7 @@ pub(crate) fn create_chain_inner( wallet_creation: args.wallet_creation, shell: OnceCell::from(shell.clone()), legacy_bridge, + evm_emulator: args.evm_emulator, }; create_wallets( diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs similarity index 89% rename from zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs index 1a4b1cb3608..578069546f9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_l2_contracts.rs @@ -2,6 +2,7 @@ use std::path::Path; use anyhow::Context; use common::{ + // contracts::build_l2_contracts, forge::{Forge, ForgeScriptArgs}, hardhat::build_l2_contracts, spinner::Spinner, @@ -35,6 +36,7 @@ pub enum Deploy2ContractsOption { Upgrader, InitiailizeBridges, ConsensusRegistry, + Multicall3, } pub async fn run( @@ -82,6 +84,16 @@ pub async fn run( ) .await?; } + Deploy2ContractsOption::Multicall3 => { + deploy_multicall3( + shell, + &chain_config, + &ecosystem_config, + &mut contracts, + args, + ) + .await?; + } Deploy2ContractsOption::InitiailizeBridges => { initialize_bridges( shell, @@ -184,6 +196,24 @@ pub async fn deploy_consensus_registry( .await } +pub async fn deploy_multicall3( + shell: &Shell, + chain_config: &ChainConfig, + ecosystem_config: &EcosystemConfig, + contracts_config: &mut ContractsConfig, + forge_args: ForgeScriptArgs, +) -> anyhow::Result<()> { + build_and_deploy( + shell, + chain_config, + ecosystem_config, + forge_args, + Some("runDeployMulticall3"), + |shell, out| contracts_config.set_multicall3(&Multicall3Output::read(shell, out)?), + ) + .await +} + pub async fn deploy_l2_contracts( shell: &Shell, chain_config: &ChainConfig, @@ -254,10 +284,7 @@ async fn call_forge( forge = forge.with_signature(signature); } - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&ecosystem_config.get_wallets()?.governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs index 0da56f0c962..4a93fcc089f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/deploy_paymaster.rs @@ -56,10 +56,7 @@ pub async fn deploy_paymaster( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key( - forge, - chain_config.get_wallets_config()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&chain_config.get_wallets_config()?.governor))?; } if broadcast { diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs new file mode 100644 index 00000000000..edf480946be --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/database.rs @@ -0,0 +1,118 @@ +use std::path::PathBuf; + +use anyhow::Context; +use common::{ + config::global_config, + db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, + logger, +}; +use config::{ + override_config, set_file_artifacts, set_rocks_db_config, set_server_database, + traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig, FileArtifacts, +}; +use types::ProverMode; +use xshell::Shell; +use zksync_basic_types::commitment::L1BatchCommitmentMode; + +use crate::{ + commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, + consts::{ + PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, + SERVER_MIGRATIONS, + }, + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, + MSG_GENESIS_DATABASES_INITIALIZED, MSG_INITIALIZING_SERVER_DATABASE, + MSG_RECREATE_ROCKS_DB_ERRROR, + }, + utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, +}; + +pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + let mut secrets = chain_config.get_secrets_config()?; + let args = args.fill_values_with_secrets(&chain_config)?; + set_server_database(&mut secrets, &args.server_db)?; + secrets.save_with_base_path(shell, &chain_config.configs)?; + + initialize_server_database( + shell, + &args.server_db, + chain_config.link_to_code.clone(), + args.dont_drop, + ) + .await?; + logger::outro(MSG_GENESIS_DATABASES_INITIALIZED); + + Ok(()) +} + +pub async fn initialize_server_database( + shell: &Shell, + server_db_config: &DatabaseConfig, + link_to_code: PathBuf, + dont_drop: bool, +) -> anyhow::Result<()> { + let path_to_server_migration = link_to_code.join(SERVER_MIGRATIONS); + + if global_config().verbose { + logger::debug(MSG_INITIALIZING_SERVER_DATABASE) + } + if !dont_drop { + drop_db_if_exists(server_db_config) + .await + .context(MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR)?; + init_db(server_db_config).await?; + } + migrate_db( + shell, + path_to_server_migration, + &server_db_config.full_url(), + ) + .await?; + + Ok(()) +} + +pub fn update_configs( + args: GenesisArgsFinal, + shell: &Shell, + config: &ChainConfig, +) -> anyhow::Result<()> { + shell.create_dir(&config.rocks_db_path)?; + + // Update secrets configs + let mut secrets = config.get_secrets_config()?; + set_server_database(&mut secrets, &args.server_db)?; + secrets.save_with_base_path(shell, &config.configs)?; + + // Update general config + let mut general = config.get_general_config()?; + let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) + .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; + let file_artifacts = FileArtifacts::new(config.artifacts.clone()); + set_rocks_db_config(&mut general, rocks_db)?; + set_file_artifacts(&mut general, file_artifacts); + general.save_with_base_path(shell, &config.configs)?; + + let link_to_code = config.link_to_code.clone(); + if config.prover_version != ProverMode::NoProofs { + override_config( + shell, + link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), + config, + )?; + } + if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { + override_config( + shell, + link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), + config, + )?; + } + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs new file mode 100644 index 00000000000..c1cc03174ae --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/mod.rs @@ -0,0 +1,92 @@ +use anyhow::Context; +use clap::{command, Parser, Subcommand}; +use common::{logger, spinner::Spinner}; +use config::{ChainConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::{ + commands::chain::{ + args::genesis::{GenesisArgs, GenesisArgsFinal}, + genesis::{self, database::initialize_server_database, server::run_server_genesis}, + }, + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, + MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, MSG_STARTING_GENESIS_SPINNER, + }, +}; + +// Genesis subcommands +pub mod database; +pub mod server; + +#[derive(Subcommand, Debug, Clone)] +pub enum GenesisSubcommands { + /// Initialize databases + #[command(alias = "database")] + InitDatabase(Box), + /// Runs server genesis + Server, +} + +#[derive(Parser, Debug)] +#[command()] +pub struct GenesisCommand { + #[command(subcommand)] + command: Option, + #[clap(flatten)] + args: GenesisArgs, +} + +pub(crate) async fn run(args: GenesisCommand, shell: &Shell) -> anyhow::Result<()> { + match args.command { + Some(GenesisSubcommands::InitDatabase(args)) => database::run(*args, shell).await, + Some(GenesisSubcommands::Server) => server::run(shell).await, + None => run_genesis(args.args, shell).await, + } +} + +pub async fn run_genesis(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let args = args.fill_values_with_prompt(&chain_config); + + genesis(args, shell, &chain_config).await?; + logger::outro(MSG_GENESIS_COMPLETED); + + Ok(()) +} + +pub async fn genesis( + args: GenesisArgsFinal, + shell: &Shell, + config: &ChainConfig, +) -> anyhow::Result<()> { + genesis::database::update_configs(args.clone(), shell, config)?; + + logger::note( + MSG_SELECTED_CONFIG, + logger::object_to_string(serde_json::json!({ + "chain_config": config, + "server_db_config": args.server_db, + })), + ); + logger::info(MSG_STARTING_GENESIS); + + let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); + initialize_server_database( + shell, + &args.server_db, + config.link_to_code.clone(), + args.dont_drop, + ) + .await?; + spinner.finish(); + + let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); + run_server_genesis(config, shell)?; + spinner.finish(); + + Ok(()) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs new file mode 100644 index 00000000000..090792e8007 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/genesis/server.rs @@ -0,0 +1,47 @@ +use anyhow::Context; +use common::{ + logger, + server::{Server, ServerMode}, + spinner::Spinner, +}; +use config::{ + traits::FileConfigWithDefaultName, ChainConfig, ContractsConfig, EcosystemConfig, + GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, +}; +use xshell::Shell; + +use crate::messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_RUN_SERVER_ERR, MSG_GENESIS_COMPLETED, + MSG_STARTING_GENESIS_SPINNER, +}; + +pub async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); + run_server_genesis(&chain_config, shell)?; + spinner.finish(); + logger::outro(MSG_GENESIS_COMPLETED); + + Ok(()) +} + +pub fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { + let server = Server::new(None, chain_config.link_to_code.clone(), false); + server + .run( + shell, + ServerMode::Genesis, + GenesisConfig::get_path_with_base_path(&chain_config.configs), + WalletsConfig::get_path_with_base_path(&chain_config.configs), + GeneralConfig::get_path_with_base_path(&chain_config.configs), + SecretsConfig::get_path_with_base_path(&chain_config.configs), + ContractsConfig::get_path_with_base_path(&chain_config.configs), + None, + vec![], + ) + .context(MSG_FAILED_TO_RUN_SERVER_ERR) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs new file mode 100644 index 00000000000..31c5c681e7d --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/configs.rs @@ -0,0 +1,108 @@ +use anyhow::Context; +use common::logger; +use config::{ + copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use ethers::types::Address; +use xshell::Shell; + +use crate::{ + commands::{ + chain::{ + args::init::configs::{InitConfigsArgs, InitConfigsArgsFinal}, + genesis, + }, + portal::update_portal_config, + }, + messages::{ + MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_CONSENSUS_CONFIG_MISSING_ERR, + MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, + }, + utils::{ + consensus::{generate_consensus_keys, get_consensus_secrets, get_genesis_specs}, + ports::EcosystemPortsScanner, + }, +}; + +pub async fn run(args: InitConfigsArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let args = args.fill_values_with_prompt(&chain_config); + + init_configs(&args, shell, &ecosystem_config, &chain_config).await?; + logger::outro(MSG_CHAIN_CONFIGS_INITIALIZED); + + Ok(()) +} + +pub async fn init_configs( + init_args: &InitConfigsArgsFinal, + shell: &Shell, + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, +) -> anyhow::Result { + // Port scanner should run before copying configs to avoid marking initial ports as assigned + let mut ecosystem_ports = EcosystemPortsScanner::scan(shell)?; + copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; + + if !init_args.no_port_reallocation { + ecosystem_ports.allocate_ports_in_yaml( + shell, + &chain_config.path_to_general_config(), + chain_config.id, + )?; + } + + let mut general_config = chain_config.get_general_config()?; + + if general_config.proof_data_handler_config.is_some() && general_config.prover_gateway.is_some() + { + let proof_data_handler_config = general_config.proof_data_handler_config.clone().unwrap(); + let mut prover_gateway = general_config.prover_gateway.clone().unwrap(); + + prover_gateway.api_url = + format!("http://127.0.0.1:{}", proof_data_handler_config.http_port); + + general_config.prover_gateway = Some(prover_gateway); + } + + let mut consensus_config = general_config + .consensus_config + .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; + + let consensus_keys = generate_consensus_keys(); + consensus_config.genesis_spec = Some(get_genesis_specs(chain_config, &consensus_keys)); + + general_config.consensus_config = Some(consensus_config); + general_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize genesis config + let mut genesis_config = chain_config.get_genesis_config()?; + update_from_chain_config(&mut genesis_config, chain_config)?; + genesis_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize contracts config + let mut contracts_config = ecosystem_config.get_contracts_config()?; + contracts_config.l1.diamond_proxy_addr = Address::zero(); + contracts_config.l1.governance_addr = Address::zero(); + contracts_config.l1.chain_admin_addr = Address::zero(); + contracts_config.l1.base_token_addr = chain_config.base_token.address; + contracts_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize secrets config + let mut secrets = chain_config.get_secrets_config()?; + set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; + secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); + secrets.save_with_base_path(shell, &chain_config.configs)?; + + genesis::database::update_configs(init_args.genesis_args.clone(), shell, chain_config)?; + + update_portal_config(shell, chain_config) + .await + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + Ok(contracts_config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs similarity index 58% rename from zk_toolbox/crates/zk_inception/src/commands/chain/init.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs index 53e49955f5e..8157a131815 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/init/mod.rs @@ -1,45 +1,64 @@ use anyhow::Context; +use clap::{command, Parser, Subcommand}; use common::{git, logger, spinner::Spinner}; -use config::{ - copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, - ChainConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, -}; +use config::{traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig}; use types::{BaseToken, L1BatchCommitmentMode}; use xshell::Shell; -use super::common::{distribute_eth, mint_base_token, register_chain}; use crate::{ accept_ownership::{accept_admin, set_da_validator_pair}, - commands::{ - chain::{ - args::init::{InitArgs, InitArgsFinal}, - deploy_l2_contracts, deploy_paymaster, - genesis::genesis, - set_token_multiplier_setter::set_token_multiplier_setter, - setup_legacy_bridge::setup_legacy_bridge, + commands::chain::{ + args::init::{ + configs::{InitConfigsArgs, InitConfigsArgsFinal}, + InitArgs, InitArgsFinal, }, - portal::update_portal_config, + common::{distribute_eth, mint_base_token}, + deploy_l2_contracts, deploy_paymaster, + genesis::genesis, + init::configs::init_configs, + register_chain::register_chain, + set_token_multiplier_setter::set_token_multiplier_setter, + setup_legacy_bridge::setup_legacy_bridge, }, - defaults::PORT_RANGE_END, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_DA_PAIR_REGISTRATION_SPINNER, MSG_DEPLOYING_PAYMASTER, - MSG_GENESIS_DATABASE_ERR, MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, - MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, + MSG_GENESIS_DATABASE_ERR, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, - utils::{ - consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, - ports::EcosystemPortsScanner, - }, }; -pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { +// Init subcommands +pub mod configs; + +#[derive(Subcommand, Debug, Clone)] +pub enum ChainInitSubcommands { + /// Initialize chain configs + Configs(InitConfigsArgs), +} + +#[derive(Parser, Debug)] +#[command()] +pub struct ChainInitCommand { + #[command(subcommand)] + command: Option, + #[clap(flatten)] + args: InitArgs, +} + +pub(crate) async fn run(args: ChainInitCommand, shell: &Shell) -> anyhow::Result<()> { + match args.command { + Some(ChainInitSubcommands::Configs(args)) => configs::run(args, shell).await, + None => run_init(args.args, shell).await, + } +} + +async fn run_init(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { let config = EcosystemConfig::from_file(shell)?; let chain_config = config .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let mut args = args.fill_values_with_prompt(&chain_config); + let args = args.fill_values_with_prompt(&chain_config); logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); logger::info(msg_initializing_chain("")); @@ -47,63 +66,28 @@ pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { git::submodule_update(shell, config.link_to_code.clone())?; } - init(&mut args, shell, &config, &chain_config).await?; + init(&args, shell, &config, &chain_config).await?; logger::success(MSG_CHAIN_INITIALIZED); Ok(()) } pub async fn init( - init_args: &mut InitArgsFinal, + init_args: &InitArgsFinal, shell: &Shell, ecosystem_config: &EcosystemConfig, chain_config: &ChainConfig, ) -> anyhow::Result<()> { - let mut ecosystem_ports = EcosystemPortsScanner::scan(shell)?; - copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; - - if !init_args.no_port_reallocation { - ecosystem_ports.allocate_ports_in_yaml( - shell, - &chain_config.path_to_general_config(), - chain_config.id, - )?; - } - let mut general_config = chain_config.get_general_config()?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - let offset = ((chain_config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = - ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; - - let consensus_keys = generate_consensus_keys(); - let consensus_config = get_consensus_config( - chain_config, - consensus_port, - Some(consensus_keys.clone()), - None, - )?; - general_config.consensus_config = Some(consensus_config); - general_config.save_with_base_path(shell, &chain_config.configs)?; - - let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, chain_config); - genesis_config.save_with_base_path(shell, &chain_config.configs)?; - - // Copy ecosystem contracts - let mut contracts_config = ecosystem_config.get_contracts_config()?; - contracts_config.l1.base_token_addr = chain_config.base_token.address; - contracts_config.save_with_base_path(shell, &chain_config.configs)?; + // Initialize configs + let init_configs_args = InitConfigsArgsFinal::from_chain_init_args(init_args); + let mut contracts_config = + init_configs(&init_configs_args, shell, ecosystem_config, chain_config).await?; + // Fund some wallet addresses with ETH or base token (only for Localhost) distribute_eth(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; mint_base_token(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; - let mut secrets = chain_config.get_secrets_config()?; - set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; - secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); - secrets.save_with_base_path(shell, &chain_config.configs)?; - + // Register chain on BridgeHub (run by L1 Governor) let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); register_chain( shell, @@ -118,12 +102,14 @@ pub async fn init( .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; spinner.finish(); + + // Accept ownership for DiamondProxy (run by L2 Governor) let spinner = Spinner::new(MSG_ACCEPTING_ADMIN_SPINNER); accept_admin( shell, ecosystem_config, contracts_config.l1.chain_admin_addr, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.diamond_proxy_addr, &init_args.forge_args.clone(), init_args.l1_rpc_url.clone(), @@ -131,13 +117,14 @@ pub async fn init( .await?; spinner.finish(); + // Set token multiplier setter address (run by L2 Governor) if chain_config.base_token != BaseToken::eth() { let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); let chain_contracts = chain_config.get_contracts_config()?; set_token_multiplier_setter( shell, ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, chain_contracts.l1.access_control_restriction_addr, chain_contracts.l1.diamond_proxy_addr, chain_config @@ -153,6 +140,7 @@ pub async fn init( spinner.finish(); } + // Deploy L2 contracts: L2SharedBridge, L2DefaultUpgrader, ... (run by L1 Governor) deploy_l2_contracts::deploy_l2_contracts( shell, chain_config, @@ -177,7 +165,7 @@ pub async fn init( shell, ecosystem_config, contracts_config.l1.chain_admin_addr, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.diamond_proxy_addr, l1_da_validator_addr, contracts_config.l2.da_validator_addr, @@ -187,6 +175,7 @@ pub async fn init( .await?; spinner.finish(); + // Setup legacy bridge - shouldn't be used for new chains (run by L1 Governor) if let Some(true) = chain_config.legacy_bridge { setup_legacy_bridge( shell, @@ -198,6 +187,7 @@ pub async fn init( .await?; } + // Deploy Paymaster contract (run by L2 Governor) if init_args.deploy_paymaster { let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); deploy_paymaster::deploy_paymaster( @@ -217,9 +207,5 @@ pub async fn init( .await .context(MSG_GENESIS_DATABASE_ERR)?; - update_portal_config(shell, chain_config) - .await - .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; - Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_from_gateway.rs b/zkstack_cli/crates/zkstack/src/commands/chain/migrate_from_gateway.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/chain/migrate_from_gateway.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/migrate_from_gateway.rs index 19b0042037d..dca212778fa 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_from_gateway.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/migrate_from_gateway.rs @@ -3,6 +3,7 @@ use clap::Parser; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, + wallets::Wallet, withdraw::ZKSProvider, }; use config::{ @@ -24,8 +25,9 @@ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use xshell::Shell; -use zksync_basic_types::{settlement::SettlementMode, H256, U256, U64}; -use zksync_config::configs::eth_sender::PubdataSendingMode; +use zksync_basic_types::{ + pubdata_da::PubdataSendingMode, settlement::SettlementMode, H256, U256, U64, +}; use zksync_types::L2ChainId; use zksync_web3_decl::client::{Client, L2}; @@ -117,7 +119,7 @@ pub async fn run(args: MigrateFromGatewayArgs, shell: &Shell) -> anyhow::Result< ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -176,7 +178,7 @@ pub async fn run(args: MigrateFromGatewayArgs, shell: &Shell) -> anyhow::Result< ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -269,7 +271,7 @@ async fn call_script( forge_args: ForgeScriptArgs, data: &Bytes, config: &EcosystemConfig, - private_key: Option, + governor: &Wallet, rpc_url: String, ) -> anyhow::Result { let mut forge = Forge::new(&config.path_to_l1_foundry()) @@ -280,7 +282,7 @@ async fn call_script( .with_calldata(data); // Governor private key is required for this script - forge = fill_forge_private_key(forge, private_key)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_to_gateway.rs b/zkstack_cli/crates/zkstack/src/commands/chain/migrate_to_gateway.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/migrate_to_gateway.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/migrate_to_gateway.rs index 7cfb041862a..fd2a78e35da 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/migrate_to_gateway.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/migrate_to_gateway.rs @@ -3,6 +3,7 @@ use clap::Parser; use common::{ config::global_config, forge::{Forge, ForgeScriptArgs}, + wallets::Wallet, }; use config::{ forge_interface::{ @@ -23,8 +24,10 @@ use lazy_static::lazy_static; use serde::{Deserialize, Serialize}; use types::L1BatchCommitmentMode; use xshell::Shell; -use zksync_basic_types::{settlement::SettlementMode, Address, H256, U256, U64}; -use zksync_config::configs::{eth_sender::PubdataSendingMode, gateway::GatewayChainConfig}; +use zksync_basic_types::{ + pubdata_da::PubdataSendingMode, settlement::SettlementMode, Address, H256, U256, U64, +}; +use zksync_config::configs::gateway::GatewayChainConfig; use zksync_system_constants::L2_BRIDGEHUB_ADDRESS; use crate::{ @@ -124,9 +127,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - gateway_chain_config - .get_wallets_config()? - .governor_private_key(), + &gateway_chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -147,7 +148,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -218,7 +219,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -246,7 +247,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -268,7 +269,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -293,7 +294,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -312,7 +313,7 @@ pub async fn run(args: MigrateToGatewayArgs, shell: &Shell) -> anyhow::Result<() ) .unwrap(), &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, l1_url.clone(), ) .await?; @@ -423,7 +424,7 @@ async fn call_script( forge_args: ForgeScriptArgs, data: &Bytes, config: &EcosystemConfig, - private_key: Option, + governor: &Wallet, l1_rpc_url: String, ) -> anyhow::Result { let mut forge = Forge::new(&config.path_to_l1_foundry()) @@ -434,7 +435,7 @@ async fn call_script( .with_calldata(data); // Governor private key is required for this script - forge = fill_forge_private_key(forge, private_key)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs similarity index 65% rename from zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/mod.rs index 877580d19a8..4846ac5e891 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/mod.rs @@ -1,17 +1,18 @@ use ::common::forge::ForgeScriptArgs; use args::build_transactions::BuildTransactionsArgs; pub(crate) use args::create::ChainCreateArgsFinal; -use clap::Subcommand; +use clap::{command, Subcommand}; pub(crate) use create::create_chain_inner; use migrate_from_gateway::MigrateFromGatewayArgs; use migrate_to_gateway::MigrateToGatewayArgs; use xshell::Shell; use crate::commands::chain::{ - args::{create::ChainCreateArgs, genesis::GenesisArgs, init::InitArgs}, - deploy_l2_contracts::Deploy2ContractsOption, + args::create::ChainCreateArgs, deploy_l2_contracts::Deploy2ContractsOption, + genesis::GenesisCommand, init::ChainInitCommand, }; +mod accept_chain_ownership; pub(crate) mod args; mod build_transactions; mod common; @@ -20,9 +21,10 @@ mod create; pub mod deploy_l2_contracts; pub mod deploy_paymaster; pub mod genesis; -pub(crate) mod init; +pub mod init; mod migrate_from_gateway; mod migrate_to_gateway; +pub mod register_chain; mod set_token_multiplier_setter; mod setup_legacy_bridge; @@ -33,20 +35,35 @@ pub enum ChainCommands { /// Create unsigned transactions for chain deployment BuildTransactions(BuildTransactionsArgs), /// Initialize chain, deploying necessary contracts and performing on-chain operations - Init(InitArgs), + Init(Box), /// Run server genesis - Genesis(GenesisArgs), - /// Initialize bridges on l2 - #[command(alias = "bridge")] - InitializeBridges(ForgeScriptArgs), - /// Deploy all l2 contracts + Genesis(GenesisCommand), + /// Register a new chain on L1 (executed by L1 governor). + /// This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, + /// registers chain with BridgeHub and sets pending admin for DiamondProxy. + /// Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership` + #[command(alias = "register")] + RegisterChain(ForgeScriptArgs), + /// Deploy all L2 contracts (executed by L1 governor). #[command(alias = "l2")] DeployL2Contracts(ForgeScriptArgs), + /// Accept ownership of L2 chain (executed by L2 governor). + /// This command should be run after `register-chain` to accept ownership of newly created + /// DiamondProxy contract. + #[command(alias = "accept-ownership")] + AcceptChainOwnership(ForgeScriptArgs), + /// Initialize bridges on L2 + #[command(alias = "bridge")] + InitializeBridges(ForgeScriptArgs), /// Deploy L2 consensus registry #[command(alias = "consensus")] DeployConsensusRegistry(ForgeScriptArgs), + /// Deploy L2 multicall3 + #[command(alias = "multicall3")] + DeployMulticall3(ForgeScriptArgs), /// Deploy Default Upgrader - Upgrader(ForgeScriptArgs), + #[command(alias = "upgrader")] + DeployUpgrader(ForgeScriptArgs), /// Deploy paymaster smart contract #[command(alias = "paymaster")] DeployPaymaster(ForgeScriptArgs), @@ -63,16 +80,21 @@ pub enum ChainCommands { pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<()> { match args { ChainCommands::Create(args) => create::run(args, shell), - ChainCommands::Init(args) => init::run(args, shell).await, + ChainCommands::Init(args) => init::run(*args, shell).await, ChainCommands::BuildTransactions(args) => build_transactions::run(args, shell).await, ChainCommands::Genesis(args) => genesis::run(args, shell).await, + ChainCommands::RegisterChain(args) => register_chain::run(args, shell).await, ChainCommands::DeployL2Contracts(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::All).await } + ChainCommands::AcceptChainOwnership(args) => accept_chain_ownership::run(args, shell).await, ChainCommands::DeployConsensusRegistry(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await } - ChainCommands::Upgrader(args) => { + ChainCommands::DeployMulticall3(args) => { + deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Multicall3).await + } + ChainCommands::DeployUpgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } ChainCommands::InitializeBridges(args) => { diff --git a/zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs b/zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs new file mode 100644 index 00000000000..db69ae47952 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/chain/register_chain.rs @@ -0,0 +1,96 @@ +use anyhow::Context; +use common::{ + forge::{Forge, ForgeScriptArgs}, + logger, + spinner::Spinner, +}; +use config::{ + forge_interface::{ + register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, + script_params::REGISTER_CHAIN_SCRIPT_PARAMS, + }, + traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use xshell::Shell; + +use crate::{ + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_CHAIN_REGISTERED, MSG_L1_SECRETS_MUST_BE_PRESENTED, + MSG_REGISTERING_CHAIN_SPINNER, + }, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let mut contracts = chain_config.get_contracts_config()?; + let secrets = chain_config.get_secrets_config()?; + let l1_rpc_url = secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); + register_chain( + shell, + args, + &ecosystem_config, + &chain_config, + &mut contracts, + l1_rpc_url, + None, + true, + ) + .await?; + contracts.save_with_base_path(shell, chain_config.configs)?; + spinner.finish(); + logger::success(MSG_CHAIN_REGISTERED); + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +pub async fn register_chain( + shell: &Shell, + forge_args: ForgeScriptArgs, + config: &EcosystemConfig, + chain_config: &ChainConfig, + contracts: &mut ContractsConfig, + l1_rpc_url: String, + sender: Option, + broadcast: bool, +) -> anyhow::Result<()> { + let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); + + let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; + deploy_config.save(shell, deploy_config_path)?; + + let mut forge = Forge::new(&config.path_to_l1_foundry()) + .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url); + + if broadcast { + forge = forge.with_broadcast(); + } + + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key(forge, Some(&config.get_wallets()?.governor))?; + check_the_balance(&forge).await?; + } + + forge.run(shell)?; + + let register_chain_output = RegisterChainOutput::read( + shell, + REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), + )?; + contracts.set_chain_contracts(®ister_chain_output); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs index 913e26f2843..d9d8994af87 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/set_token_multiplier_setter.rs @@ -3,12 +3,13 @@ use common::{ forge::{Forge, ForgeScript, ForgeScriptArgs}, logger, spinner::Spinner, + wallets::Wallet, }; use config::{forge_interface::script_params::ACCEPT_GOVERNANCE_SCRIPT_PARAMS, EcosystemConfig}; use ethers::{abi::parse_abi, contract::BaseContract, utils::hex}; use lazy_static::lazy_static; use xshell::Shell; -use zksync_basic_types::{Address, H256}; +use zksync_basic_types::Address; use crate::{ messages::{ @@ -52,7 +53,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { set_token_multiplier_setter( shell, &ecosystem_config, - chain_config.get_wallets_config()?.governor_private_key(), + &chain_config.get_wallets_config()?.governor, contracts_config.l1.access_control_restriction_addr, contracts_config.l1.diamond_proxy_addr, token_multiplier_setter_address, @@ -74,7 +75,7 @@ pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { pub async fn set_token_multiplier_setter( shell: &Shell, ecosystem_config: &EcosystemConfig, - governor: Option, + governor: &Wallet, access_control_restriction_address: Address, diamond_proxy_address: Address, new_setter_address: Address, @@ -112,10 +113,10 @@ pub async fn set_token_multiplier_setter( async fn update_token_multiplier_setter( shell: &Shell, - governor: Option, + governor: &Wallet, mut forge: ForgeScript, ) -> anyhow::Result<()> { - forge = fill_forge_private_key(forge, governor)?; + forge = fill_forge_private_key(forge, Some(governor))?; check_the_balance(&forge).await?; forge.run(shell)?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs b/zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs rename to zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs index 925014fe4e6..f61c640ffb6 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/setup_legacy_bridge.rs +++ b/zkstack_cli/crates/zkstack/src/commands/chain/setup_legacy_bridge.rs @@ -59,10 +59,7 @@ pub async fn setup_legacy_bridge( ) .with_broadcast(); - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.governor_private_key(), - )?; + forge = fill_forge_private_key(forge, Some(&ecosystem_config.get_wallets()?.governor))?; let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); check_the_balance(&forge).await?; diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs new file mode 100644 index 00000000000..c9d878c8fd3 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/conv.rs @@ -0,0 +1,47 @@ +use anyhow::Context as _; +use zksync_config::configs::consensus as config; +use zksync_consensus_crypto::TextFmt as _; +use zksync_consensus_roles::attester; +use zksync_protobuf::{ProtoFmt, ProtoRepr}; + +use super::proto; +use crate::utils::consensus::parse_attester_committee; + +#[derive(Debug, Clone, PartialEq)] +pub(super) struct SetAttesterCommitteeFile { + pub attesters: attester::Committee, +} + +impl ProtoFmt for SetAttesterCommitteeFile { + type Proto = proto::SetAttesterCommitteeFile; + + fn read(r: &Self::Proto) -> anyhow::Result { + // zksync_config was not allowed to depend on consensus crates, + // therefore to parse the config we need to go through the intermediate + // representation of consensus types defined in zksync_config. + let attesters: Vec<_> = r + .attesters + .iter() + .map(|x| x.read()) + .collect::>() + .context("attesters")?; + Ok(Self { + attesters: parse_attester_committee(&attesters)?, + }) + } + + fn build(&self) -> Self::Proto { + Self::Proto { + attesters: self + .attesters + .iter() + .map(|a| { + ProtoRepr::build(&config::WeightedAttester { + key: config::AttesterPublicKey(a.key.encode()), + weight: a.weight, + }) + }) + .collect(), + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs similarity index 79% rename from zk_toolbox/crates/zk_inception/src/commands/consensus.rs rename to zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs index 7cf96ebe5ad..1855a5943dc 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/mod.rs @@ -1,10 +1,11 @@ -use std::{borrow::Borrow, collections::HashMap, sync::Arc}; +use std::{borrow::Borrow, collections::HashMap, path::PathBuf, sync::Arc}; /// Consensus registry contract operations. /// Includes code duplicated from `zksync_node_consensus::registry::abi`. use anyhow::Context as _; -use common::logger; +use common::{logger, wallets::Wallet}; use config::EcosystemConfig; +use conv::*; use ethers::{ abi::Detokenize, contract::{FunctionCall, Multicall}, @@ -19,6 +20,11 @@ use zksync_consensus_roles::{attester, validator}; use crate::{messages, utils::consensus::parse_attester_committee}; +mod conv; +mod proto; +#[cfg(test)] +mod tests; + #[allow(warnings)] mod abi { include!(concat!(env!("OUT_DIR"), "/consensus_registry_abi.rs")); @@ -65,11 +71,25 @@ fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::Bls12381Sign } } +#[derive(clap::Args, Debug)] +#[group(required = true, multiple = false)] +pub struct SetAttesterCommitteeCommand { + /// Sets the attester committee in the consensus registry contract to + /// `consensus.genesis_spec.attesters` in general.yaml. + #[clap(long)] + from_genesis: bool, + /// Sets the attester committee in the consensus registry contract to + /// the committee in the yaml file. + /// File format is definied in `commands/consensus/proto/mod.proto`. + #[clap(long)] + from_file: Option, +} + #[derive(clap::Subcommand, Debug)] pub enum Command { /// Sets the attester committee in the consensus registry contract to /// `consensus.genesis_spec.attesters` in general.yaml. - SetAttesterCommittee, + SetAttesterCommittee(SetAttesterCommitteeCommand), /// Fetches the attester committee from the consensus registry contract. GetAttesterCommittee, } @@ -154,26 +174,26 @@ impl Setup { )?) } - fn governor(&self) -> anyhow::Result> { - let governor = self + fn governor(&self) -> anyhow::Result { + Ok(self .chain .get_wallets_config() .context("get_wallets_config()")? - .governor - .private_key - .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?; - let governor = LocalWallet::from_bytes(governor.as_bytes()) - .context("LocalWallet::from_bytes()")? - .with_chain_id(self.genesis.l2_chain_id.as_u64()); + .governor) + } + + fn signer(&self, wallet: LocalWallet) -> anyhow::Result> { + let wallet = wallet.with_chain_id(self.genesis.l2_chain_id.as_u64()); let provider = self.provider().context("provider()")?; - let signer = SignerMiddleware::new(provider, governor.clone()); + let signer = SignerMiddleware::new(provider, wallet.clone()); // Allows us to send next transaction without waiting for the previous to complete. - let signer = NonceManagerMiddleware::new(signer, governor.address()); + let signer = NonceManagerMiddleware::new(signer, wallet.address()); Ok(Arc::new(signer)) } fn new(shell: &Shell) -> anyhow::Result { - let ecosystem_config = EcosystemConfig::from_file(shell)?; + let ecosystem_config = + EcosystemConfig::from_file(shell).context("EcosystemConfig::from_file()")?; let chain = ecosystem_config .load_current_chain() .context(messages::MSG_CHAIN_NOT_INITIALIZED)?; @@ -227,9 +247,21 @@ impl Setup { attester::Committee::new(attesters.into_iter()).context("attester::Committee::new()") } - async fn set_attester_committee(&self) -> anyhow::Result { + fn read_attester_committee( + &self, + opts: &SetAttesterCommitteeCommand, + ) -> anyhow::Result { // Fetch the desired state. - let want = (|| { + if let Some(path) = &opts.from_file { + let yaml = std::fs::read_to_string(path).context("read_to_string()")?; + let file: SetAttesterCommitteeFile = zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt_from_yaml(&yaml) + .context("proto_fmt_from_yaml()")?; + return Ok(file.attesters); + } + let attesters = (|| { Some( &self .general @@ -241,15 +273,32 @@ impl Setup { ) })() .context(messages::MSG_CONSENSUS_GENESIS_SPEC_ATTESTERS_MISSING_IN_GENERAL_YAML)?; - let want = parse_attester_committee(want).context("parse_attester_committee()")?; + parse_attester_committee(attesters).context("parse_attester_committee()") + } + async fn set_attester_committee(&self, want: &attester::Committee) -> anyhow::Result<()> { let provider = self.provider().context("provider()")?; let block_id = self.last_block(&provider).await.context("last_block()")?; let governor = self.governor().context("governor()")?; + let signer = self.signer( + governor + .private_key + .clone() + .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?, + )?; let consensus_registry = self - .consensus_registry(governor.clone()) + .consensus_registry(signer.clone()) .context("consensus_registry()")?; - let mut multicall = self.multicall(governor.clone()).context("multicall()")?; + let mut multicall = self.multicall(signer).context("multicall()")?; + + let owner = consensus_registry.owner().call().await.context("owner()")?; + if owner != governor.address { + anyhow::bail!( + "governor ({:#x}) is different than the consensus registry owner ({:#x})", + governor.address, + owner + ); + } // Fetch contract state. let n: usize = consensus_registry @@ -337,7 +386,7 @@ impl Setup { ) .await?; txs.wait(&provider).await.context("wait()")?; - Ok(want) + Ok(()) } } @@ -345,8 +394,11 @@ impl Command { pub(crate) async fn run(self, shell: &Shell) -> anyhow::Result<()> { let setup = Setup::new(shell).context("Setup::new()")?; match self { - Self::SetAttesterCommittee => { - let want = setup.set_attester_committee().await?; + Self::SetAttesterCommittee(opts) => { + let want = setup + .read_attester_committee(&opts) + .context("read_attester_committee()")?; + setup.set_attester_committee(&want).await?; let got = setup.get_attester_committee().await?; anyhow::ensure!( got == want, diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto new file mode 100644 index 00000000000..d8a7323f714 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.proto @@ -0,0 +1,9 @@ +syntax = "proto3"; + +package zksync.toolbox.consensus; + +import "zksync/core/consensus.proto"; + +message SetAttesterCommitteeFile { + repeated core.consensus.WeightedAttester attesters = 1; +} diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs new file mode 100644 index 00000000000..61a0a047f0a --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/proto/mod.rs @@ -0,0 +1,6 @@ +#![allow(warnings)] + +include!(concat!( + env!("OUT_DIR"), + "/src/commands/consensus/proto/gen.rs" +)); diff --git a/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs b/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs new file mode 100644 index 00000000000..c2f393ad229 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/consensus/tests.rs @@ -0,0 +1,19 @@ +use rand::{distributions::Distribution, Rng}; +use zksync_consensus_utils::EncodeDist; +use zksync_protobuf::testonly::{test_encode_all_formats, FmtConv}; + +use super::SetAttesterCommitteeFile; + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> SetAttesterCommitteeFile { + SetAttesterCommitteeFile { + attesters: rng.gen(), + } + } +} + +#[test] +fn test_encoding() { + let rng = &mut rand::thread_rng(); + test_encode_all_formats::>(rng); +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/containers.rs b/zkstack_cli/crates/zkstack/src/commands/containers.rs similarity index 90% rename from zk_toolbox/crates/zk_inception/src/commands/containers.rs rename to zkstack_cli/crates/zkstack/src/commands/containers.rs index 9c11cc2e3ef..8367289bd67 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/containers.rs +++ b/zkstack_cli/crates/zkstack/src/commands/containers.rs @@ -36,10 +36,6 @@ pub fn run(shell: &Shell, args: ContainersArgs) -> anyhow::Result<()> { } pub fn initialize_docker(shell: &Shell, ecosystem: &EcosystemConfig) -> anyhow::Result<()> { - if !shell.path_exists("volumes") { - create_docker_folders(shell)?; - }; - if !shell.path_exists(DOCKER_COMPOSE_FILE) { copy_dockerfile(shell, ecosystem.link_to_code.clone())?; }; @@ -75,14 +71,6 @@ pub fn start_containers(shell: &Shell, observability: bool) -> anyhow::Result<() Ok(()) } -fn create_docker_folders(shell: &Shell) -> anyhow::Result<()> { - shell.create_dir("volumes")?; - shell.create_dir("volumes/postgres")?; - shell.create_dir("volumes/reth")?; - shell.create_dir("volumes/reth/data")?; - Ok(()) -} - fn copy_dockerfile(shell: &Shell, link_to_code: PathBuf) -> anyhow::Result<()> { let docker_compose_file = link_to_code.join(DOCKER_COMPOSE_FILE); diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs similarity index 81% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs index 6f7eae4c168..ab169220f29 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/args/releases.rs +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/args/releases.rs @@ -1,8 +1,8 @@ use std::str::FromStr; -use common::{cmd::Cmd, spinner::Spinner}; +use common::spinner::Spinner; use serde::Deserialize; -use xshell::{cmd, Shell}; +use xshell::Shell; use crate::messages::{MSG_INVALID_ARCH_ERR, MSG_NO_RELEASES_FOUND_ERR}; @@ -76,16 +76,19 @@ fn get_compatible_archs(asset_name: &str) -> anyhow::Result> { fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result> { if repo == "ethereum/solc-bin" { - return get_solc_releases(shell, arch); + return get_solc_releases(arch); } - let response: std::process::Output = Cmd::new(cmd!( - shell, - "curl https://api.github.com/repos/{repo}/releases" - )) - .run_with_output()?; + let client = reqwest::blocking::Client::new(); + let mut request = client + .get(format!("https://api.github.com/repos/{repo}/releases")) + .header("User-Agent", "zkstack"); - let response = String::from_utf8(response.stdout)?; + if let Ok(token) = shell.var("GITHUB_TOKEN") { + request = request.header("Authorization", format!("Bearer {}", token)); + } + + let response = request.send()?.text()?; let releases: Vec = serde_json::from_str(&response)?; let mut versions = vec![]; @@ -109,7 +112,7 @@ fn get_releases(shell: &Shell, repo: &str, arch: Arch) -> anyhow::Result anyhow::Result> { +fn get_solc_releases(arch: Arch) -> anyhow::Result> { let (arch_str, compatible_archs) = match arch { Arch::LinuxAmd => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), Arch::LinuxArm => ("linux-amd64", vec![Arch::LinuxAmd, Arch::LinuxArm]), @@ -117,13 +120,15 @@ fn get_solc_releases(shell: &Shell, arch: Arch) -> anyhow::Result> Arch::MacosArm => ("macosx-amd64", vec![Arch::MacosAmd, Arch::MacosArm]), }; - let response: std::process::Output = Cmd::new(cmd!( - shell, - "curl https://raw.githubusercontent.com/ethereum/solc-bin/gh-pages/{arch_str}/list.json" - )) - .run_with_output()?; + let client = reqwest::blocking::Client::new(); + let response = client + .get(format!( + "https://raw.githubusercontent.com/ethereum/solc-bin/gh-pages/{arch_str}/list.json" + )) + .header("User-Agent", "zkstack") + .send()? + .text()?; - let response = String::from_utf8(response.stdout)?; let solc_list: SolcList = serde_json::from_str(&response)?; let mut versions = vec![]; diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs similarity index 96% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs index f376a0d36ec..b173ad9bbb7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/init.rs @@ -89,7 +89,8 @@ fn download_binary( let spinner = Spinner::new(&msg_downloading_binary_spinner(name, version)); Cmd::new(cmd!(shell, "mkdir -p {path}")).run()?; - Cmd::new(cmd!(shell, "wget {url} -O {binary_path}")).run()?; + let response = reqwest::blocking::get(url)?.bytes()?; + shell.write_file(binary_path.clone(), &response)?; Cmd::new(cmd!(shell, "chmod +x {binary_path}")).run()?; spinner.finish(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs b/zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs rename to zkstack_cli/crates/zkstack/src/commands/contract_verifier/run.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs similarity index 92% rename from zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs index 06ee1347ea4..06dff541f94 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/clean/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/clean/mod.rs @@ -4,9 +4,8 @@ use common::{docker, logger}; use config::{EcosystemConfig, DOCKER_COMPOSE_FILE}; use xshell::Shell; -use crate::messages::{ - MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_CLEANED, - MSG_DOCKER_COMPOSE_DOWN, MSG_DOCKER_COMPOSE_REMOVE_VOLUMES, +use crate::commands::dev::messages::{ + MSG_CONTRACTS_CLEANING, MSG_CONTRACTS_CLEANING_FINISHED, MSG_DOCKER_COMPOSE_DOWN, }; #[derive(Subcommand, Debug)] @@ -35,9 +34,6 @@ pub fn run(shell: &Shell, args: CleanCommands) -> anyhow::Result<()> { pub fn containers(shell: &Shell) -> anyhow::Result<()> { logger::info(MSG_DOCKER_COMPOSE_DOWN); docker::down(shell, DOCKER_COMPOSE_FILE)?; - logger::info(MSG_DOCKER_COMPOSE_REMOVE_VOLUMES); - shell.remove_path("volumes")?; - logger::info(MSG_DOCKER_COMPOSE_CLEANED); Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs similarity index 96% rename from zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs index 04e019936e1..70238ed15f3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/config_writer.rs @@ -4,7 +4,7 @@ use common::{logger, Prompt}; use config::{override_config, EcosystemConfig}; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_overriding_config, MSG_CHAIN_NOT_FOUND_ERR, MSG_OVERRIDE_CONFIG_PATH_HELP, MSG_OVERRIDE_SUCCESS, MSG_OVERRRIDE_CONFIG_PATH_PROMPT, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs similarity index 66% rename from zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs index b0f129f7dde..ff638a033dd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/contracts.rs @@ -1,17 +1,23 @@ use std::path::PathBuf; use clap::Parser; -use common::{cmd::Cmd, logger, spinner::Spinner}; +use common::{ + contracts::{ + build_l1_contracts, build_l1_da_contracts, build_l2_contracts, build_system_contracts, + build_test_contracts, + }, + logger, + spinner::Spinner, +}; use config::EcosystemConfig; -use xshell::{cmd, Shell}; +use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_BUILDING_CONTRACTS, MSG_BUILDING_CONTRACTS_SUCCESS, MSG_BUILDING_L1_CONTRACTS_SPINNER, MSG_BUILDING_L1_DA_CONTRACTS_SPINNER, MSG_BUILDING_L2_CONTRACTS_SPINNER, MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER, MSG_BUILDING_TEST_CONTRACTS_SPINNER, MSG_BUILD_L1_CONTRACTS_HELP, MSG_BUILD_L1_DA_CONTRACTS_HELP, MSG_BUILD_L2_CONTRACTS_HELP, - MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_CONTRACTS_DEPS_SPINNER, - MSG_NOTHING_TO_BUILD_MSG, + MSG_BUILD_SYSTEM_CONTRACTS_HELP, MSG_BUILD_TEST_CONTRACTS_HELP, MSG_NOTHING_TO_BUILD_MSG, }; #[derive(Debug, Parser)] @@ -76,72 +82,46 @@ pub enum ContractType { TestContracts, } -#[derive(Debug)] struct ContractBuilder { - dir: PathBuf, - cmd: String, + cmd: Box anyhow::Result<()>>, msg: String, + link_to_code: PathBuf, } impl ContractBuilder { fn new(ecosystem: &EcosystemConfig, contract_type: ContractType) -> Self { match contract_type { ContractType::L1 => Self { - dir: ecosystem.path_to_l1_foundry(), - cmd: "forge build".to_string(), + cmd: Box::new(build_l1_contracts), msg: MSG_BUILDING_L1_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::L1DA => Self { - dir: ecosystem.link_to_code.join("contracts/da-contracts"), - cmd: "forge build".to_string(), + cmd: Box::new(build_l1_da_contracts), msg: MSG_BUILDING_L1_DA_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::L2 => Self { - dir: ecosystem.link_to_code.clone(), - cmd: "yarn l2-contracts build".to_string(), + cmd: Box::new(build_l2_contracts), msg: MSG_BUILDING_L2_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::SystemContracts => Self { - dir: ecosystem.link_to_code.join("contracts"), - cmd: "yarn sc build".to_string(), + cmd: Box::new(build_system_contracts), msg: MSG_BUILDING_SYSTEM_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, ContractType::TestContracts => Self { - dir: ecosystem.link_to_code.join("etc/contracts-test-data"), - cmd: "yarn build".to_string(), + cmd: Box::new(build_test_contracts), msg: MSG_BUILDING_TEST_CONTRACTS_SPINNER.to_string(), + link_to_code: ecosystem.link_to_code.clone(), }, } } - fn build(&self, shell: &Shell) -> anyhow::Result<()> { + fn build(self, shell: Shell) -> anyhow::Result<()> { let spinner = Spinner::new(&self.msg); - let _dir_guard = shell.push_dir(&self.dir); - - // FIXME: extreme hack, we also need to build 1l contracts without foundry for now - if self.msg == MSG_BUILDING_L1_CONTRACTS_SPINNER { - let cstr = "yarn build".to_string(); - let mut args = cstr.split_whitespace().collect::>(); - let command = args.remove(0); // It's safe to unwrap here because we know that the vec is not empty - let mut cmd = cmd!(shell, "{command}"); - - for arg in args { - cmd = cmd.arg(arg); - } - - Cmd::new(cmd).run()?; - } - - let mut args = self.cmd.split_whitespace().collect::>(); - let command = args.remove(0); // It's safe to unwrap here because we know that the vec is not empty - let mut cmd = cmd!(shell, "{command}"); - - for arg in args { - cmd = cmd.arg(arg); - } - - Cmd::new(cmd).run()?; - + (self.cmd)(shell, self.link_to_code.clone())?; spinner.finish(); Ok(()) } @@ -157,17 +137,11 @@ pub fn run(shell: &Shell, args: ContractsArgs) -> anyhow::Result<()> { logger::info(MSG_BUILDING_CONTRACTS); let ecosystem = EcosystemConfig::from_file(shell)?; - let link_to_code = ecosystem.link_to_code.clone(); - - let spinner = Spinner::new(MSG_CONTRACTS_DEPS_SPINNER); - let _dir_guard = shell.push_dir(&link_to_code); - Cmd::new(cmd!(shell, "yarn install")).run()?; - spinner.finish(); contracts .iter() .map(|contract| ContractBuilder::new(&ecosystem, *contract)) - .try_for_each(|builder| builder.build(shell))?; + .try_for_each(|builder| builder.build(shell.clone()))?; logger::outro(MSG_BUILDING_CONTRACTS_SUCCESS); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs index cf9dfc2834a..f05e3ee1c0e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/mod.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::{ +use crate::commands::dev::{ dals::SelectedDals, messages::{ MSG_DATABASE_COMMON_CORE_HELP, MSG_DATABASE_COMMON_CORE_URL_HELP, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs index 64b7a507abe..b91b048be78 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/args/new_migration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/args/new_migration.rs @@ -2,7 +2,7 @@ use clap::{Parser, ValueEnum}; use common::{Prompt, PromptSelect}; use strum::{Display, EnumIter, IntoEnumIterator}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DATABASE_NEW_MIGRATION_DATABASE_HELP, MSG_DATABASE_NEW_MIGRATION_DB_PROMPT, MSG_DATABASE_NEW_MIGRATION_NAME_HELP, MSG_DATABASE_NEW_MIGRATION_NAME_PROMPT, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs index 0c401595690..990fca78641 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/check_sqlx_data.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/check_sqlx_data.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs index 94bf325a2c6..a5578d41f77 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/drop.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/drop.rs @@ -6,7 +6,7 @@ use common::{ use xshell::Shell; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_DROP_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs index 1d648965c24..fd22f769742 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/migrate.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/migrate.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_MIGRATE_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs index 415b81879f1..ed039fc6501 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/mod.rs @@ -2,7 +2,7 @@ use clap::Subcommand; use xshell::Shell; use self::args::{new_migration::DatabaseNewMigrationArgs, DatabaseCommonArgs}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DATABASE_CHECK_SQLX_DATA_ABOUT, MSG_DATABASE_DROP_ABOUT, MSG_DATABASE_MIGRATE_ABOUT, MSG_DATABASE_NEW_MIGRATION_ABOUT, MSG_DATABASE_PREPARE_ABOUT, MSG_DATABASE_RESET_ABOUT, MSG_DATABASE_SETUP_ABOUT, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs index e21b7cde47b..2d9fa103053 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/new_migration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/new_migration.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::new_migration::{DatabaseNewMigrationArgs, SelectedDatabase}; -use crate::{ +use crate::commands::dev::{ dals::{get_core_dal, get_prover_dal, Dal}, messages::{msg_database_new_migration_loading, MSG_DATABASE_NEW_MIGRATION_SUCCESS}, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs index 82ec12f9412..288a68452fd 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/prepare.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/prepare.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_PREPARE_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs index f0262cecb95..55d5ab1cbfc 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/reset.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/reset.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::Shell; use super::{args::DatabaseCommonArgs, drop::drop_database, setup::setup_database}; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_RESET_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs index 15b3ac5c1c7..74ade66ba48 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/database/setup.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/database/setup.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::args::DatabaseCommonArgs; -use crate::{ +use crate::commands::dev::{ dals::{get_dals, Dal}, messages::{ msg_database_info, msg_database_loading, msg_database_success, MSG_DATABASE_SETUP_GERUND, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs similarity index 92% rename from zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs index 3aefc15aba7..ebaf27845e0 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/fmt.rs @@ -6,7 +6,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::sql_fmt::format_sql; -use crate::{ +use crate::commands::dev::{ commands::lint_utils::{get_unignored_files, Target}, messages::{ msg_running_fmt_for_extension_spinner, msg_running_fmt_for_extensions_spinner, @@ -42,7 +42,7 @@ async fn prettier_contracts(shell: Shell, check: bool) -> anyhow::Result<()> { } async fn rustfmt(shell: Shell, check: bool, link_to_code: PathBuf) -> anyhow::Result<()> { - for dir in [".", "prover", "zk_toolbox"] { + for dir in [".", "prover", "zkstack_cli"] { let spinner = Spinner::new(&msg_running_rustfmt_for_dir_spinner(dir)); let _dir = shell.push_dir(link_to_code.join(dir)); let mut cmd = cmd!(shell, "cargo fmt -- --config imports_granularity=Crate --config group_imports=StdExternalCrate"); @@ -101,14 +101,9 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { ))); tasks.push(tokio::spawn(prettier_contracts(shell.clone(), args.check))); - futures::future::join_all(tasks) - .await - .iter() - .for_each(|res| { - if let Err(err) = res { - logger::error(err) - } - }); + for result in futures::future::join_all(tasks).await { + result??; + } } Some(Formatter::Prettier { mut targets }) => { if targets.is_empty() { diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs new file mode 100644 index 00000000000..683ffe19916 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/genesis.rs @@ -0,0 +1,26 @@ +use anyhow::Context; +use common::{cmd::Cmd, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use crate::{ + commands::dev::{ + commands::database::reset::reset_database, dals::get_core_dal, + messages::MSG_GENESIS_FILE_GENERATION_STARTED, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, +}; + +pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_chain(Some(ecosystem.current_chain().to_string())) + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let spinner = Spinner::new(MSG_GENESIS_FILE_GENERATION_STARTED); + let secrets_path = chain.path_to_secrets_config(); + let dal = get_core_dal(shell, None)?; + reset_database(shell, ecosystem.link_to_code, dal).await?; + Cmd::new(cmd!(shell,"cargo run --package genesis_generator --bin genesis_generator -- --config-path={secrets_path}")).run()?; + spinner.finish(); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs similarity index 62% rename from zk_toolbox/crates/zk_supervisor/src/commands/lint.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs index 45a7a46ebbe..6c3c3fa3d75 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint.rs @@ -1,13 +1,23 @@ +use std::{ + fs::File, + io::{Read, Write}, + path::Path, +}; + +use anyhow::{bail, Context}; use clap::Parser; use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::{ - commands::lint_utils::{get_unignored_files, Target}, - messages::{ - msg_running_linter_for_extension_spinner, msg_running_linters_for_files, - MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, +use crate::commands::{ + autocomplete::{autocomplete_file_name, generate_completions}, + dev::{ + commands::lint_utils::{get_unignored_files, Target}, + messages::{ + msg_running_linter_for_extension_spinner, msg_running_linters_for_files, + MSG_LINT_CONFIG_PATH_ERR, MSG_RUNNING_CONTRACTS_LINTER_SPINNER, + }, }, }; @@ -30,6 +40,7 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { Target::Js, Target::Ts, Target::Contracts, + Target::Autocompletion, ] } else { args.targets.clone() @@ -43,10 +54,13 @@ pub fn run(shell: &Shell, args: LintArgs) -> anyhow::Result<()> { match target { Target::Rs => lint_rs(shell, &ecosystem, args.check)?, Target::Contracts => lint_contracts(shell, &ecosystem, args.check)?, + Target::Autocompletion => lint_autocompletion_files(shell, args.check)?, ext => lint(shell, &ecosystem, &ext, args.check)?, } } + logger::outro("Linting complete."); + Ok(()) } @@ -55,8 +69,8 @@ fn lint_rs(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> anyhow::R let link_to_code = &ecosystem.link_to_code; let lint_to_prover = &ecosystem.link_to_code.join("prover"); - let link_to_toolbox = &ecosystem.link_to_code.join("zk_toolbox"); - let paths = vec![link_to_code, lint_to_prover, link_to_toolbox]; + let link_to_zkstack = &ecosystem.link_to_code.join("zkstack_cli"); + let paths = vec![link_to_code, lint_to_prover, link_to_zkstack]; spinner.freeze(); for path in paths { @@ -81,6 +95,7 @@ fn get_linter(target: &Target) -> Vec { Target::Js => vec!["eslint".to_string()], Target::Ts => vec!["eslint".to_string(), "--ext".to_string(), "ts".to_string()], Target::Contracts => vec![], + Target::Autocompletion => vec![], } } @@ -133,3 +148,45 @@ fn lint_contracts(shell: &Shell, ecosystem: &EcosystemConfig, check: bool) -> an Ok(()) } + +fn lint_autocompletion_files(_shell: &Shell, check: bool) -> anyhow::Result<()> { + let completion_folder = Path::new("./zkstack_cli/crates/zkstack/completion/"); + if !completion_folder.exists() { + logger::info("WARNING: Please run this command from the project's root folder"); + return Ok(()); + } + + // Array of supported shells + let shells = [ + clap_complete::Shell::Bash, + clap_complete::Shell::Fish, + clap_complete::Shell::Zsh, + ]; + + for shell in shells { + let mut writer = Vec::new(); + + generate_completions(shell, &mut writer) + .context("Failed to generate autocompletion file")?; + + let new = String::from_utf8(writer)?; + + let path = completion_folder.join(autocomplete_file_name(&shell)); + let mut autocomplete_file = File::open(path.clone()) + .context(format!("failed to open {}", autocomplete_file_name(&shell)))?; + + let mut old = String::new(); + autocomplete_file.read_to_string(&mut old)?; + + if new != old { + if !check { + let mut autocomplete_file = File::create(path).context("Failed to create file")?; + autocomplete_file.write_all(new.as_bytes())?; + } else { + bail!("Autocompletion files need to be regenerated. Run `zkstack dev lint -t autocompletion` to fix this issue.") + } + } + } + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs index 9095e445384..11a32504710 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/lint_utils.rs @@ -14,6 +14,7 @@ pub enum Target { Ts, Rs, Contracts, + Autocompletion, } #[derive(Deserialize, Serialize, Debug)] diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs similarity index 87% rename from zk_toolbox/crates/zk_supervisor/src/commands/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs index 38ec586e745..a292168dc6e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/mod.rs @@ -3,10 +3,12 @@ pub mod config_writer; pub mod contracts; pub mod database; pub mod fmt; +pub mod genesis; pub mod lint; pub(crate) mod lint_utils; pub mod prover; pub mod send_transactions; pub mod snapshot; pub(crate) mod sql_fmt; +pub mod status; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_batch.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_batch.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_batch.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_version.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/insert_version.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/insert_version.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/args/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs index 441edb2c4b2..84873e931b3 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/info.rs @@ -8,7 +8,7 @@ use common::logger; use config::{ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; +use crate::commands::dev::messages::MSG_CHAIN_NOT_FOUND_ERR; pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs index 8c2cdd4d88d..0e0c0ba33af 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_batch.rs @@ -2,7 +2,7 @@ use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::prover::{ args::insert_batch::{InsertBatchArgs, InsertBatchArgsFinal}, info, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs index 3dd9b7e0a1b..f7bd175f577 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/insert_version.rs @@ -2,7 +2,7 @@ use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::prover::{ args::insert_version::{InsertVersionArgs, InsertVersionArgsFinal}, info, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/prover/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/prover/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs index e3d4f220ff2..03d9ec9b736 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/args/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/args/mod.rs @@ -4,7 +4,7 @@ use clap::Parser; use common::Prompt; use url::Url; -use crate::{ +use crate::commands::dev::{ defaults::LOCAL_RPC_URL, messages::{ MSG_INVALID_L1_RPC_URL_ERR, MSG_PROMPT_L1_RPC_URL, MSG_PROMPT_SECRET_KEY, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs similarity index 99% rename from zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs index 79d8efc600e..2f54579ade9 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/send_transactions/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/send_transactions/mod.rs @@ -17,7 +17,7 @@ use tokio::time::sleep; use xshell::Shell; use zksync_basic_types::{H160, U256}; -use crate::{ +use crate::commands::dev::{ consts::DEFAULT_UNSIGNED_TRANSACTIONS_DIR, messages::{ msg_send_txns_outro, MSG_FAILED_TO_SEND_TXN_ERR, MSG_UNABLE_TO_OPEN_FILE_ERR, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs similarity index 91% rename from zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs index 608c5623334..8e4c7183cb5 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/snapshot.rs @@ -4,7 +4,7 @@ use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_RUNNING_SNAPSHOT_CREATOR}; +use crate::commands::dev::messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_RUNNING_SNAPSHOT_CREATOR}; #[derive(Subcommand, Debug)] pub enum SnapshotCommands { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs index ede2500e6ab..0f7ce061ce1 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/sql_fmt.rs @@ -6,7 +6,7 @@ use sqruff_lib::{api::simple::get_simple_config, core::linter::core::Linter}; use xshell::Shell; use super::lint_utils::{get_unignored_files, IgnoredData, Target}; -use crate::messages::{msg_file_is_not_formatted, MSG_RUNNING_SQL_FMT_SPINNER}; +use crate::commands::dev::messages::{msg_file_is_not_formatted, MSG_RUNNING_SQL_FMT_SPINNER}; fn format_query(query: &str) -> anyhow::Result { let exclude_rules = vec!["LT12".to_string()]; // avoid adding newline before `$` character @@ -138,7 +138,7 @@ pub async fn format_sql(shell: Shell, check: bool) -> anyhow::Result<()> { let spinner = Spinner::new(MSG_RUNNING_SQL_FMT_SPINNER); let ignored_data = Some(IgnoredData { files: vec![], - dirs: vec!["zk_toolbox".to_string()], + dirs: vec!["zkstack_cli".to_string()], }); let rust_files = get_unignored_files(&shell, &Target::Rs, ignored_data)?; for file in rust_files { diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs new file mode 100644 index 00000000000..5ac52bf854a --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/args.rs @@ -0,0 +1,45 @@ +use anyhow::Context; +use clap::Parser; +use config::EcosystemConfig; +use xshell::Shell; + +use crate::{ + commands::dev::messages::{ + MSG_API_CONFIG_NOT_FOUND_ERR, MSG_STATUS_PORTS_HELP, MSG_STATUS_URL_HELP, + }, + messages::MSG_CHAIN_NOT_FOUND_ERR, +}; + +#[derive(Debug, Parser)] +pub enum StatusSubcommands { + #[clap(about = MSG_STATUS_PORTS_HELP)] + Ports, +} + +#[derive(Debug, Parser)] +pub struct StatusArgs { + #[clap(long, short = 'u', help = MSG_STATUS_URL_HELP)] + pub url: Option, + #[clap(subcommand)] + pub subcommand: Option, +} + +impl StatusArgs { + pub fn get_url(&self, shell: &Shell) -> anyhow::Result { + if let Some(url) = &self.url { + Ok(url.clone()) + } else { + let ecosystem = EcosystemConfig::from_file(shell)?; + let chain = ecosystem + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let general_config = chain.get_general_config()?; + let health_check_port = general_config + .api_config + .context(MSG_API_CONFIG_NOT_FOUND_ERR)? + .healthcheck + .port; + Ok(format!("http://localhost:{}/health", health_check_port)) + } + } +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs new file mode 100644 index 00000000000..d38d5b6d29f --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/draw.rs @@ -0,0 +1,88 @@ +use crate::{commands::dev::commands::status::utils::is_port_open, utils::ports::PortInfo}; + +const DEFAULT_LINE_WIDTH: usize = 32; + +pub struct BoxProperties { + longest_line: usize, + border: String, + boxed_msg: Vec, +} + +impl BoxProperties { + fn new(msg: &str) -> Self { + let longest_line = msg + .lines() + .map(|line| line.len()) + .max() + .unwrap_or(0) + .max(DEFAULT_LINE_WIDTH); + let width = longest_line + 2; + let border = "─".repeat(width); + let boxed_msg = msg + .lines() + .map(|line| format!("│ {:longest_line$} │", line)) + .collect(); + Self { + longest_line, + border, + boxed_msg, + } + } +} + +fn single_bordered_box(msg: &str) -> String { + let properties = BoxProperties::new(msg); + format!( + "┌{}┐\n{}\n└{}┘\n", + properties.border, + properties.boxed_msg.join("\n"), + properties.border + ) +} + +pub fn bordered_boxes(msg1: &str, msg2: Option<&String>) -> String { + if msg2.is_none() { + return single_bordered_box(msg1); + } + + let properties1 = BoxProperties::new(msg1); + let properties2 = BoxProperties::new(msg2.unwrap()); + + let max_lines = properties1.boxed_msg.len().max(properties2.boxed_msg.len()); + let header = format!("┌{}┐ ┌{}┐\n", properties1.border, properties2.border); + let footer = format!("└{}┘ └{}┘\n", properties1.border, properties2.border); + + let empty_line1 = format!( + "│ {:longest_line$} │", + "", + longest_line = properties1.longest_line + ); + let empty_line2 = format!( + "│ {:longest_line$} │", + "", + longest_line = properties2.longest_line + ); + + let boxed_info: Vec = (0..max_lines) + .map(|i| { + let line1 = properties1.boxed_msg.get(i).unwrap_or(&empty_line1); + let line2 = properties2.boxed_msg.get(i).unwrap_or(&empty_line2); + format!("{} {}", line1, line2) + }) + .collect(); + + format!("{}{}\n{}", header, boxed_info.join("\n"), footer) +} + +pub fn format_port_info(port_info: &PortInfo) -> String { + let in_use_tag = if is_port_open(port_info.port) { + " [OPEN]" + } else { + "" + }; + + format!( + " - {}{} > {}\n", + port_info.port, in_use_tag, port_info.description + ) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs new file mode 100644 index 00000000000..8687fcb0476 --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/mod.rs @@ -0,0 +1,135 @@ +use std::collections::HashMap; + +use anyhow::Context; +use args::{StatusArgs, StatusSubcommands}; +use common::logger; +use draw::{bordered_boxes, format_port_info}; +use serde::Deserialize; +use serde_json::Value; +use utils::deslugify; +use xshell::Shell; + +use crate::{ + commands::dev::messages::{ + msg_failed_parse_response, msg_not_ready_components, msg_system_status, + MSG_ALL_COMPONENTS_READY, MSG_COMPONENTS, MSG_SOME_COMPONENTS_NOT_READY, + }, + utils::ports::EcosystemPortsScanner, +}; + +pub mod args; +mod draw; +mod utils; + +const STATUS_READY: &str = "ready"; + +#[derive(Deserialize, Debug)] +struct StatusResponse { + status: String, + components: HashMap, +} + +#[derive(Deserialize, Debug)] +struct Component { + status: String, + details: Option, +} + +fn print_status(health_check_url: String) -> anyhow::Result<()> { + let client = reqwest::blocking::Client::new(); + let response = client.get(&health_check_url).send()?.text()?; + + let status_response: StatusResponse = + serde_json::from_str(&response).context(msg_failed_parse_response(&response))?; + + if status_response.status.to_lowercase() == STATUS_READY { + logger::success(msg_system_status(&status_response.status)); + } else { + logger::warn(msg_system_status(&status_response.status)); + } + + let mut components_info = String::from(MSG_COMPONENTS); + let mut components = Vec::new(); + let mut not_ready_components = Vec::new(); + + for (component_name, component) in status_response.components { + let readable_name = deslugify(&component_name); + let mut component_info = format!("{}:\n - Status: {}", readable_name, component.status); + + if let Some(details) = &component.details { + for (key, value) in details.as_object().unwrap() { + component_info.push_str(&format!("\n - {}: {}", deslugify(key), value)); + } + } + + if component.status.to_lowercase() != STATUS_READY { + not_ready_components.push(readable_name); + } + + components.push(component_info); + } + + components.sort_by(|a, b| { + a.lines() + .count() + .cmp(&b.lines().count()) + .then_with(|| a.cmp(b)) + }); + + for chunk in components.chunks(2) { + components_info.push_str(&bordered_boxes(&chunk[0], chunk.get(1))); + } + + logger::info(components_info); + + if not_ready_components.is_empty() { + logger::outro(MSG_ALL_COMPONENTS_READY); + } else { + logger::warn(MSG_SOME_COMPONENTS_NOT_READY); + logger::outro(msg_not_ready_components(¬_ready_components.join(", "))); + } + + Ok(()) +} + +fn print_ports(shell: &Shell) -> anyhow::Result<()> { + let ports = EcosystemPortsScanner::scan(shell)?; + let grouped_ports = ports.group_by_file_path(); + + let mut all_port_lines: Vec = Vec::new(); + + for (file_path, port_infos) in grouped_ports { + let mut port_info_lines = String::new(); + + for port_info in port_infos { + port_info_lines.push_str(&format_port_info(&port_info)); + } + + all_port_lines.push(format!("{}:\n{}", file_path, port_info_lines)); + } + + all_port_lines.sort_by(|a, b| { + b.lines() + .count() + .cmp(&a.lines().count()) + .then_with(|| a.cmp(b)) + }); + + let mut components_info = String::from("Ports:\n"); + for chunk in all_port_lines.chunks(2) { + components_info.push_str(&bordered_boxes(&chunk[0], chunk.get(1))); + } + + logger::info(components_info); + Ok(()) +} + +pub async fn run(shell: &Shell, args: StatusArgs) -> anyhow::Result<()> { + if let Some(StatusSubcommands::Ports) = args.subcommand { + return print_ports(shell); + } + + let health_check_url = args.get_url(shell)?; + + print_status(health_check_url) +} diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs new file mode 100644 index 00000000000..399a0fb0fec --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/status/utils.rs @@ -0,0 +1,26 @@ +use std::net::TcpListener; + +pub fn is_port_open(port: u16) -> bool { + TcpListener::bind(("0.0.0.0", port)).is_err() || TcpListener::bind(("127.0.0.1", port)).is_err() +} + +pub fn deslugify(name: &str) -> String { + name.split('_') + .map(|word| { + let mut chars = word.chars(); + match chars.next() { + Some(first) => { + let capitalized = first.to_uppercase().collect::() + chars.as_str(); + match capitalized.as_str() { + "Http" => "HTTP".to_string(), + "Api" => "API".to_string(), + "Ws" => "WS".to_string(), + _ => capitalized, + } + } + None => String::new(), + } + }) + .collect::>() + .join(" ") +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs similarity index 65% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs index 1337566e536..9e76850ff2e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/fees.rs @@ -1,12 +1,12 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; +use crate::commands::dev::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct FeesArgs { #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs similarity index 63% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs index 435dddfc360..625df0fc151 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/integration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/integration.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP}; +use crate::commands::dev::messages::{ + MSG_NO_DEPS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, MSG_TEST_PATTERN_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct IntegrationArgs { @@ -9,6 +11,6 @@ pub struct IntegrationArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_TEST_PATTERN_HELP)] + #[clap(short, long, help = MSG_TEST_PATTERN_HELP, allow_hyphen_values(true))] pub test_pattern: Option, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/mod.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs similarity index 66% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs index 81cc58fbd9b..b6ce278a1ca 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/recovery.rs @@ -1,7 +1,9 @@ use clap::Parser; use serde::{Deserialize, Serialize}; -use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_TESTS_RECOVERY_SNAPSHOT_HELP}; +use crate::commands::dev::messages::{ + MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_TESTS_RECOVERY_SNAPSHOT_HELP, +}; #[derive(Debug, Serialize, Deserialize, Parser)] pub struct RecoveryArgs { @@ -9,6 +11,6 @@ pub struct RecoveryArgs { pub snapshot: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs similarity index 85% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs index 0154a4c0afd..9f86eec7f3d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/revert.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP, MSG_REVERT_TEST_ENABLE_CONSENSUS_HELP, MSG_TESTS_EXTERNAL_NODE_HELP, }; @@ -13,6 +13,6 @@ pub struct RevertArgs { pub external_node: bool, #[clap(short, long, help = MSG_NO_DEPS_HELP)] pub no_deps: bool, - #[clap(short, long, help = MSG_NO_KILL_HELP)] + #[clap(long, help = MSG_NO_KILL_HELP)] pub no_kill: bool, } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs similarity index 70% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs index 2d94adc3f6a..6ca277f6a2f 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/rust.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/rust.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::MSG_TEST_RUST_OPTIONS_HELP; +use crate::commands::dev::messages::MSG_TEST_RUST_OPTIONS_HELP; #[derive(Debug, Parser)] pub struct RustArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs similarity index 72% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs index dd96957e9d3..7b631b91e9a 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/upgrade.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/args/upgrade.rs @@ -1,6 +1,6 @@ use clap::Parser; -use crate::messages::MSG_NO_DEPS_HELP; +use crate::commands::dev::messages::MSG_NO_DEPS_HELP; #[derive(Debug, Parser)] pub struct UpgradeArgs { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs index f48967f5973..dea6a46bbef 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/build.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/build.rs @@ -6,8 +6,8 @@ use super::utils::{build_contracts, install_and_build_dependencies}; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; - build_contracts(shell, &ecosystem_config)?; install_and_build_dependencies(shell, &ecosystem_config)?; + build_contracts(shell, &ecosystem_config)?; Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs similarity index 83% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs index a08b0404605..19f6307019b 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/db.rs @@ -3,7 +3,7 @@ use std::path::Path; use common::{cmd::Cmd, db::wait_for_db, logger}; use xshell::{cmd, Shell}; -use crate::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; +use crate::commands::dev::{commands::database, dals::Dal, messages::MSG_RESETTING_TEST_DATABASES}; pub async fn reset_test_databases( shell: &Shell, @@ -26,7 +26,7 @@ pub async fn reset_test_databases( for dal in dals { let mut url = dal.url.clone(); url.set_path(""); - wait_for_db(&url, 3).await?; + wait_for_db(&url, 20).await?; database::reset::reset_database(shell, link_to_code, dal.clone()).await?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs index e0b881a14db..e58a70e6b7c 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/fees.rs @@ -9,7 +9,7 @@ use super::{ args::fees::FeesArgs, utils::{build_contracts, install_and_build_dependencies, TS_INTEGRATION_PATH}, }; -use crate::{ +use crate::commands::dev::{ commands::test::utils::{TestWallets, TEST_WALLETS_PATH}, messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs index 8f76e56fe83..bee0f0788ee 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/integration.rs @@ -12,7 +12,7 @@ use super::{ TS_INTEGRATION_PATH, }, }; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_integration_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_INTEGRATION_TESTS_RUN_SUCCESS, }; @@ -28,8 +28,8 @@ pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { logger::info(msg_integration_tests_run(args.external_node)); if !args.no_deps { - build_contracts(shell, &ecosystem_config)?; install_and_build_dependencies(shell, &ecosystem_config)?; + build_contracts(shell, &ecosystem_config)?; } let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs similarity index 86% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs index 0a1e1ec5203..7d163daed67 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/l1_contracts.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/l1_contracts.rs @@ -2,7 +2,7 @@ use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::MSG_L1_CONTRACTS_TEST_SUCCESS; +use crate::commands::dev::messages::MSG_L1_CONTRACTS_TEST_SUCCESS; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs similarity index 95% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs index ee307438ec9..72a8f97ff97 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/loadtest.rs @@ -3,7 +3,7 @@ use common::{cmd::Cmd, config::global_config, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; -use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; +use crate::commands::dev::messages::MSG_CHAIN_NOT_FOUND_ERR; pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs similarity index 92% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs index ae6b4518e6d..095e27652aa 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/mod.rs @@ -5,7 +5,7 @@ use args::{ use clap::Subcommand; use xshell::Shell; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_BUILD_ABOUT, MSG_INTEGRATION_TESTS_ABOUT, MSG_L1_CONTRACTS_ABOUT, MSG_LOADTEST_ABOUT, MSG_PROVER_TEST_ABOUT, MSG_RECOVERY_TEST_ABOUT, MSG_REVERT_TEST_ABOUT, MSG_RUST_TEST_ABOUT, MSG_TEST_WALLETS_INFO, MSG_UPGRADE_TEST_ABOUT, @@ -30,7 +30,7 @@ mod wallet; pub enum TestCommands { #[clap(about = MSG_INTEGRATION_TESTS_ABOUT, alias = "i")] Integration(IntegrationArgs), - #[clap(about = "Run fees test", alias = "i")] + #[clap(about = "Run fees test", alias = "f")] Fees(FeesArgs), #[clap(about = MSG_REVERT_TEST_ABOUT, alias = "r")] Revert(RevertArgs), @@ -40,7 +40,7 @@ pub enum TestCommands { Upgrade(UpgradeArgs), #[clap(about = MSG_BUILD_ABOUT)] Build, - #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit")] + #[clap(about = MSG_RUST_TEST_ABOUT, alias = "unit", allow_hyphen_values(true))] Rust(RustArgs), #[clap(about = MSG_L1_CONTRACTS_ABOUT, alias = "l1")] L1Contracts, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs similarity index 97% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs index f48b359a935..200baf57215 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/prover.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/prover.rs @@ -5,7 +5,7 @@ use config::EcosystemConfig; use url::Url; use xshell::{cmd, Shell}; -use crate::{ +use crate::commands::dev::{ commands::test::db::reset_test_databases, dals::{Dal, PROVER_DAL_PATH}, defaults::TEST_DATABASE_PROVER_URL, diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs index 6a3e337d41e..ae889969fd2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/recovery.rs @@ -9,7 +9,7 @@ use super::{ args::recovery::RecoveryArgs, utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, }; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_RECOVERY_TEST_RUN_INFO, MSG_RECOVERY_TEST_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs similarity index 98% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs index 8b00e9d7f4d..dc95c88db20 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/revert.rs @@ -9,7 +9,7 @@ use super::{ args::revert::RevertArgs, utils::{install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, }; -use crate::messages::{ +use crate::commands::dev::messages::{ msg_revert_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_REVERT_TEST_RUN_INFO, MSG_REVERT_TEST_RUN_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs similarity index 94% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs index 7011e0f0f87..8c0c707f6a2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/rust.rs @@ -7,7 +7,7 @@ use url::Url; use xshell::{cmd, Shell}; use super::args::rust::RustArgs; -use crate::{ +use crate::commands::dev::{ commands::test::db::reset_test_databases, dals::{Dal, CORE_DAL_PATH, PROVER_DAL_PATH}, defaults::{TEST_DATABASE_PROVER_URL, TEST_DATABASE_SERVER_URL}, @@ -75,8 +75,8 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { .env("TEST_PROVER_DATABASE_URL", test_prover_url); cmd.run()?; - // Run unit tests for zk_toolbox - let _dir_guard = shell.push_dir(link_to_code.join("zk_toolbox")); + // Run unit tests for ZK Stack CLI + let _dir_guard = shell.push_dir(link_to_code.join("zkstack_cli")); Cmd::new(cmd!(shell, "cargo nextest run --release")) .with_force_run() .run()?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs similarity index 91% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs index 9bd04b81ef3..707e0086ed1 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/upgrade.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/upgrade.rs @@ -3,7 +3,7 @@ use config::EcosystemConfig; use xshell::{cmd, Shell}; use super::{args::upgrade::UpgradeArgs, utils::install_and_build_dependencies}; -use crate::messages::{MSG_UPGRADE_TEST_RUN_INFO, MSG_UPGRADE_TEST_RUN_SUCCESS}; +use crate::commands::dev::messages::{MSG_UPGRADE_TEST_RUN_INFO, MSG_UPGRADE_TEST_RUN_SUCCESS}; const UPGRADE_TESTS_PATH: &str = "core/tests/upgrade-test"; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs similarity index 93% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs index 8656ff44d31..bcd524bd2cb 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/utils.rs @@ -10,7 +10,7 @@ use ethers::{ use serde::Deserialize; use xshell::{cmd, Shell}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_INTEGRATION_TESTS_BUILDING_CONTRACTS, MSG_INTEGRATION_TESTS_BUILDING_DEPENDENCIES, }; @@ -43,10 +43,11 @@ impl TestWallets { } pub fn get_test_pk(&self, chain_config: &ChainConfig) -> anyhow::Result { - self.get_test_wallet(chain_config)? - .private_key - .ok_or(anyhow::Error::msg("Private key not found")) - .map(|pk| pk.encode_hex::()) + Ok(self + .get_test_wallet(chain_config)? + .private_key_h256() + .context("Private key not found")? + .encode_hex()) } pub async fn init_test_wallet( diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs similarity index 96% rename from zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs index 62f32b50d55..6953014bf92 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/commands/test/wallet.rs @@ -6,7 +6,7 @@ use config::EcosystemConfig; use xshell::Shell; use super::utils::{TestWallets, TEST_WALLETS_PATH}; -use crate::messages::{ +use crate::commands::dev::messages::{ MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_TEST_WALLETS_INFO, MSG_WALLETS_TEST_SUCCESS, }; diff --git a/zk_toolbox/crates/zk_supervisor/src/consts.rs b/zkstack_cli/crates/zkstack/src/commands/dev/consts.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/consts.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/consts.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zkstack_cli/crates/zkstack/src/commands/dev/dals.rs similarity index 95% rename from zk_toolbox/crates/zk_supervisor/src/dals.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/dals.rs index 962a848fe00..9626edfed73 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/dals.rs @@ -1,9 +1,9 @@ -use anyhow::{anyhow, Context}; +use anyhow::Context as _; use config::{EcosystemConfig, SecretsConfig}; use url::Url; use xshell::Shell; -use crate::{ +use super::{ commands::database::args::DalUrls, messages::{MSG_CHAIN_NOT_FOUND_ERR, MSG_DATABASE_MUST_BE_PRESENTED}, }; @@ -91,7 +91,7 @@ fn get_secrets(shell: &Shell) -> anyhow::Result { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config .load_current_chain() - .ok_or(anyhow!(MSG_CHAIN_NOT_FOUND_ERR))?; + .context(MSG_CHAIN_NOT_FOUND_ERR)?; let secrets = chain_config.get_secrets_config()?; Ok(secrets) diff --git a/zk_toolbox/crates/zk_supervisor/src/defaults.rs b/zkstack_cli/crates/zkstack/src/commands/dev/defaults.rs similarity index 100% rename from zk_toolbox/crates/zk_supervisor/src/defaults.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/defaults.rs diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs similarity index 90% rename from zk_toolbox/crates/zk_supervisor/src/messages.rs rename to zkstack_cli/crates/zkstack/src/commands/dev/messages.rs index 3a49fa1ae9b..4dad1b2b6e2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/commands/dev/messages.rs @@ -1,13 +1,11 @@ -use crate::commands::lint_utils::Target; +use super::commands::lint_utils::Target; // Ecosystem related messages pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; -pub(super) fn msg_global_chain_does_not_exist(chain: &str, available_chains: &str) -> String { - format!("Chain with name {chain} doesnt exist, please choose one of: {available_chains}") -} - // Subcommands help +pub(super) const MSG_GENERATE_GENESIS_ABOUT: &str = + "Generate new genesis file based on current contracts"; pub(super) const MSG_PROVER_VERSION_ABOUT: &str = "Protocol version used by provers"; pub(super) const MSG_SUBCOMMAND_DATABASE_ABOUT: &str = "Database related commands"; pub(super) const MSG_SUBCOMMAND_TESTS_ABOUT: &str = "Run tests"; @@ -110,7 +108,6 @@ pub(super) const MSG_RESETTING_TEST_DATABASES: &str = "Resetting test databases" // Contract building related messages pub(super) const MSG_NOTHING_TO_BUILD_MSG: &str = "Nothing to build!"; pub(super) const MSG_BUILDING_CONTRACTS: &str = "Building contracts"; -pub(super) const MSG_CONTRACTS_DEPS_SPINNER: &str = "Installing dependencies.."; pub(super) const MSG_BUILDING_L2_CONTRACTS_SPINNER: &str = "Building L2 contracts.."; pub(super) const MSG_BUILDING_L1_CONTRACTS_SPINNER: &str = "Building L1 contracts.."; pub(super) const MSG_BUILDING_L1_DA_CONTRACTS_SPINNER: &str = "Building L1 DA contracts.."; @@ -162,9 +159,7 @@ pub(super) const MSG_UPGRADE_TEST_RUN_INFO: &str = "Running upgrade test"; pub(super) const MSG_UPGRADE_TEST_RUN_SUCCESS: &str = "Upgrade test ran successfully"; // Cleaning related messages -pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down"; -pub(super) const MSG_DOCKER_COMPOSE_REMOVE_VOLUMES: &str = "docker compose remove volumes"; -pub(super) const MSG_DOCKER_COMPOSE_CLEANED: &str = "docker compose network cleaned"; +pub(super) const MSG_DOCKER_COMPOSE_DOWN: &str = "docker compose down -v"; pub(super) const MSG_CONTRACTS_CLEANING: &str = "Removing contracts building and deployment artifacts"; pub(super) const MSG_CONTRACTS_CLEANING_FINISHED: &str = @@ -236,3 +231,29 @@ pub(super) const MSG_UNABLE_TO_WRITE_FILE_ERR: &str = "Unable to write data to f pub(super) const MSG_UNABLE_TO_READ_PARSE_JSON_ERR: &str = "Unable to parse JSON"; pub(super) const MSG_FAILED_TO_SEND_TXN_ERR: &str = "Failed to send transaction"; pub(super) const MSG_INVALID_L1_RPC_URL_ERR: &str = "Invalid L1 RPC URL"; + +// Status related messages +pub(super) const MSG_STATUS_ABOUT: &str = "Get status of the server"; +pub(super) const MSG_API_CONFIG_NOT_FOUND_ERR: &str = "API config not found"; +pub(super) const MSG_STATUS_URL_HELP: &str = "URL of the health check endpoint"; +pub(super) const MSG_STATUS_PORTS_HELP: &str = "Show used ports"; +pub(super) const MSG_COMPONENTS: &str = "Components:\n"; +pub(super) const MSG_ALL_COMPONENTS_READY: &str = + "Overall System Status: All components operational and ready."; +pub(super) const MSG_SOME_COMPONENTS_NOT_READY: &str = + "Overall System Status: Some components are not ready."; + +pub(super) fn msg_system_status(status: &str) -> String { + format!("System Status: {}\n", status) +} + +pub(super) fn msg_failed_parse_response(response: &str) -> String { + format!("Failed to parse response: {}", response) +} + +pub(super) fn msg_not_ready_components(components: &str) -> String { + format!("Not Ready Components: {}", components) +} + +// Genesis +pub(super) const MSG_GENESIS_FILE_GENERATION_STARTED: &str = "Regenerate genesis file"; diff --git a/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs new file mode 100644 index 00000000000..409c3a764eb --- /dev/null +++ b/zkstack_cli/crates/zkstack/src/commands/dev/mod.rs @@ -0,0 +1,70 @@ +use clap::Subcommand; +use commands::status::args::StatusArgs; +use messages::MSG_STATUS_ABOUT; +use xshell::Shell; + +use self::commands::{ + clean::CleanCommands, config_writer::ConfigWriterArgs, contracts::ContractsArgs, + database::DatabaseCommands, fmt::FmtArgs, lint::LintArgs, prover::ProverCommands, + send_transactions::args::SendTransactionsArgs, snapshot::SnapshotCommands, test::TestCommands, +}; +use crate::commands::dev::messages::{ + MSG_CONFIG_WRITER_ABOUT, MSG_CONTRACTS_ABOUT, MSG_GENERATE_GENESIS_ABOUT, + MSG_PROVER_VERSION_ABOUT, MSG_SEND_TXNS_ABOUT, MSG_SUBCOMMAND_CLEAN, + MSG_SUBCOMMAND_DATABASE_ABOUT, MSG_SUBCOMMAND_FMT_ABOUT, MSG_SUBCOMMAND_LINT_ABOUT, + MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT, MSG_SUBCOMMAND_TESTS_ABOUT, +}; + +mod commands; +mod consts; +mod dals; +mod defaults; +mod messages; + +#[derive(Subcommand, Debug)] +pub enum DevCommands { + #[command(subcommand, about = MSG_SUBCOMMAND_DATABASE_ABOUT, alias = "db")] + Database(DatabaseCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_TESTS_ABOUT, alias = "t")] + Test(TestCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_CLEAN)] + Clean(CleanCommands), + #[command(subcommand, about = MSG_SUBCOMMAND_SNAPSHOTS_CREATOR_ABOUT)] + Snapshot(SnapshotCommands), + #[command(about = MSG_SUBCOMMAND_LINT_ABOUT, alias = "l")] + Lint(LintArgs), + #[command(about = MSG_SUBCOMMAND_FMT_ABOUT)] + Fmt(FmtArgs), + #[command(subcommand, about = MSG_PROVER_VERSION_ABOUT)] + Prover(ProverCommands), + #[command(about = MSG_CONTRACTS_ABOUT)] + Contracts(ContractsArgs), + #[command(about = MSG_CONFIG_WRITER_ABOUT, alias = "o")] + ConfigWriter(ConfigWriterArgs), + #[command(about = MSG_SEND_TXNS_ABOUT)] + SendTransactions(SendTransactionsArgs), + #[command(about = MSG_STATUS_ABOUT)] + Status(StatusArgs), + #[command(about = MSG_GENERATE_GENESIS_ABOUT, alias = "genesis")] + GenerateGenesis, +} + +pub async fn run(shell: &Shell, args: DevCommands) -> anyhow::Result<()> { + match args { + DevCommands::Database(command) => commands::database::run(shell, command).await?, + DevCommands::Test(command) => commands::test::run(shell, command).await?, + DevCommands::Clean(command) => commands::clean::run(shell, command)?, + DevCommands::Snapshot(command) => commands::snapshot::run(shell, command).await?, + DevCommands::Lint(args) => commands::lint::run(shell, args)?, + DevCommands::Fmt(args) => commands::fmt::run(shell.clone(), args).await?, + DevCommands::Prover(command) => commands::prover::run(shell, command).await?, + DevCommands::Contracts(args) => commands::contracts::run(shell, args)?, + DevCommands::ConfigWriter(args) => commands::config_writer::run(shell, args)?, + DevCommands::SendTransactions(args) => { + commands::send_transactions::run(shell, args).await? + } + DevCommands::Status(args) => commands::status::run(shell, args).await?, + DevCommands::GenerateGenesis => commands::genesis::run(shell).await?, + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/change_default.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/change_default.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/change_default.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/change_default.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs index 2e5c50f4538..6b6c1236d36 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/create.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/create.rs @@ -1,7 +1,7 @@ use std::path::{Path, PathBuf}; use anyhow::bail; -use clap::Parser; +use clap::{Parser, ValueHint}; use common::{cmd::Cmd, logger, Prompt, PromptConfirm, PromptSelect}; use serde::{Deserialize, Serialize}; use slugify_rs::slugify; @@ -26,7 +26,7 @@ pub struct EcosystemCreateArgs { pub ecosystem_name: Option, #[clap(long, help = MSG_L1_NETWORK_HELP, value_enum)] pub l1_network: Option, - #[clap(long, help = MSG_LINK_TO_CODE_HELP)] + #[clap(long, help = MSG_LINK_TO_CODE_HELP, value_hint = ValueHint::DirPath)] pub link_to_code: Option, #[clap(flatten)] #[serde(flatten)] @@ -71,7 +71,13 @@ impl EcosystemCreateArgs { // Make the only chain as a default one self.chain.set_as_default = Some(true); - let chain = self.chain.fill_values_with_prompt(0, &l1_network, vec![])?; + let chain = self.chain.fill_values_with_prompt( + shell, + 0, + &l1_network, + vec![], + Path::new(&link_to_code), + )?; let start_containers = self.start_containers.unwrap_or_else(|| { PromptConfirm::new(MSG_START_CONTAINERS_PROMPT) diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs similarity index 79% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs index 6eb3780755f..9bf332b3bee 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/init.rs @@ -10,10 +10,10 @@ use crate::{ commands::chain::args::genesis::GenesisArgs, defaults::LOCAL_RPC_URL, messages::{ - MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEPLOY_PAYMASTER_PROMPT, - MSG_DEV_ARG_HELP, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, - MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, - MSG_OBSERVABILITY_PROMPT, + MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEV_ARG_HELP, + MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, + MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, MSG_OBSERVABILITY_PROMPT, + MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, }, }; @@ -74,9 +74,6 @@ pub struct EcosystemArgsFinal { #[derive(Debug, Clone, Serialize, Deserialize, Parser)] pub struct EcosystemInitArgs { - /// Deploy Paymaster contract - #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub deploy_paymaster: Option, /// Deploy ERC20 contracts #[clap(long, default_missing_value = "true", num_args = 0..=1)] pub deploy_erc20: Option, @@ -86,14 +83,23 @@ pub struct EcosystemInitArgs { #[clap(flatten)] #[serde(flatten)] pub forge_args: ForgeScriptArgs, - #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] - #[serde(flatten)] - pub genesis_args: GenesisArgs, + /// Deploy Paymaster contract + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub deploy_paymaster: Option, + #[clap(long, help = MSG_SERVER_DB_URL_HELP)] + pub server_db_url: Option, + #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] + pub server_db_name: Option, + #[clap(long, short, action)] + pub dont_drop: bool, + /// Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand) + #[clap(long, default_value_t = false)] + pub ecosystem_only: bool, #[clap(long, help = MSG_DEV_ARG_HELP)] pub dev: bool, #[clap(long, short = 'o', help = MSG_OBSERVABILITY_HELP, default_missing_value = "true", num_args = 0..=1)] pub observability: Option, - #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP)] pub no_port_reallocation: bool, #[clap( long, @@ -110,21 +116,24 @@ pub struct EcosystemInitArgs { } impl EcosystemInitArgs { + pub fn get_genesis_args(&self) -> GenesisArgs { + GenesisArgs { + server_db_url: self.server_db_url.clone(), + server_db_name: self.server_db_name.clone(), + dev: self.dev, + dont_drop: self.dont_drop, + } + } + pub fn fill_values_with_prompt(self, l1_network: L1Network) -> EcosystemInitArgsFinal { - let (deploy_paymaster, deploy_erc20) = if self.dev { - (true, true) + let deploy_erc20 = if self.dev { + true } else { - let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { - PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) - .default(true) - .ask() - }); - let deploy_erc20 = self.deploy_erc20.unwrap_or_else(|| { + self.deploy_erc20.unwrap_or_else(|| { PromptConfirm::new(MSG_DEPLOY_ERC20_PROMPT) .default(true) .ask() - }); - (deploy_paymaster, deploy_erc20) + }) }; let ecosystem = self.ecosystem.fill_values_with_prompt(l1_network, self.dev); let observability = if self.dev { @@ -138,12 +147,12 @@ impl EcosystemInitArgs { }; EcosystemInitArgsFinal { - deploy_paymaster, deploy_erc20, ecosystem, forge_args: self.forge_args.clone(), dev: self.dev, observability, + ecosystem_only: self.ecosystem_only, no_port_reallocation: self.no_port_reallocation, skip_submodules_checkout: self.skip_submodules_checkout, skip_contract_compilation_override: self.skip_contract_compilation_override, @@ -153,12 +162,12 @@ impl EcosystemInitArgs { #[derive(Debug, Serialize, Deserialize)] pub struct EcosystemInitArgsFinal { - pub deploy_paymaster: bool, pub deploy_erc20: bool, pub ecosystem: EcosystemArgsFinal, pub forge_args: ForgeScriptArgs, pub dev: bool, pub observability: bool, + pub ecosystem_only: bool, pub no_port_reallocation: bool, pub skip_submodules_checkout: bool, pub skip_contract_compilation_override: bool, diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/build_transactions.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/build_transactions.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/build_transactions.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/change_default.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/change_default.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/change_default.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs index dbd487bff3c..0dcc8e03378 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/common.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/common.rs @@ -26,9 +26,11 @@ pub async fn deploy_l1( broadcast: bool, ) -> anyhow::Result { let deploy_config_path = DEPLOY_ECOSYSTEM_SCRIPT_PARAMS.input(&config.link_to_code); + dbg!(config.get_default_configs_path()); let default_genesis_config = GenesisConfig::read_with_base_path(shell, config.get_default_configs_path()) - .context("Context")?; + .context("failed reading genesis config")?; + dbg!(2); let wallets_config = config.get_wallets()?; // For deploying ecosystem we only need genesis batch params @@ -54,7 +56,7 @@ pub async fn deploy_l1( if let Some(address) = sender { forge = forge.with_sender(address); } else { - forge = fill_forge_private_key(forge, wallets_config.deployer_private_key())?; + forge = fill_forge_private_key(forge, wallets_config.deployer.as_ref())?; } if broadcast { diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/create.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/create.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/create_configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/create_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/create_configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs similarity index 79% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs index fc4dc3ccf57..b823344f9b3 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/ecosystem/init.rs @@ -3,6 +3,7 @@ use std::{path::PathBuf, str::FromStr}; use anyhow::Context; use common::{ config::global_config, + contracts::build_system_contracts, forge::{Forge, ForgeScriptArgs}, git, hardhat::{build_l1_contracts, build_l2_contracts}, @@ -28,7 +29,7 @@ use super::{ args::init::{EcosystemArgsFinal, EcosystemInitArgs, EcosystemInitArgsFinal}, common::deploy_l1, setup_observability, - utils::{build_da_contracts, build_system_contracts, install_yarn_dependencies}, + utils::{build_da_contracts, install_yarn_dependencies}, }; use crate::{ accept_ownership::{accept_admin, accept_owner}, @@ -39,9 +40,8 @@ use crate::{ }, }, messages::{ - msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, - msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, - MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, + msg_chain_load_err, msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, + msg_initializing_chain, MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, MSG_DEPLOYING_ERC20_SPINNER, MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, MSG_INITIALIZING_ECOSYSTEM, MSG_INTALLING_DEPS_SPINNER, @@ -62,11 +62,9 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { Err(_) => create_initial_deployments_config(shell, &ecosystem_config.config)?, }; - let mut genesis_args = args.genesis_args.clone(); - if args.dev { - genesis_args.use_default = true; - } - let mut final_ecosystem_args = args.fill_values_with_prompt(ecosystem_config.l1_network); + let mut final_ecosystem_args = args + .clone() + .fill_values_with_prompt(ecosystem_config.l1_network); logger::info(MSG_INITIALIZING_ECOSYSTEM); @@ -74,7 +72,7 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { setup_observability::run(shell)?; } - let contracts_config = init( + let contracts_config = init_ecosystem( &mut final_ecosystem_args, shell, &ecosystem_config, @@ -99,43 +97,17 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { .await?; } - // If the name of chain passed then we deploy exactly this chain otherwise deploy all chains - let list_of_chains = if let Some(name) = global_config().chain_name.clone() { - vec![name] - } else { - ecosystem_config.list_of_chains() - }; - - for chain_name in &list_of_chains { - logger::info(msg_initializing_chain(chain_name)); - let chain_config = ecosystem_config - .load_chain(Some(chain_name.clone())) - .context(MSG_CHAIN_NOT_INITIALIZED)?; - - let mut chain_init_args = chain::args::init::InitArgsFinal { - forge_args: final_ecosystem_args.forge_args.clone(), - genesis_args: genesis_args.clone().fill_values_with_prompt(&chain_config), - deploy_paymaster: final_ecosystem_args.deploy_paymaster, - l1_rpc_url: final_ecosystem_args.ecosystem.l1_rpc_url.clone(), - no_port_reallocation: final_ecosystem_args.no_port_reallocation, - skip_submodules_checkout: final_ecosystem_args.skip_submodules_checkout, - }; - - chain::init::init( - &mut chain_init_args, - shell, - &ecosystem_config, - &chain_config, - ) - .await?; + // Initialize chain(s) + let mut chains: Vec = vec![]; + if !final_ecosystem_args.ecosystem_only { + chains = init_chains(&args, &final_ecosystem_args, shell, &ecosystem_config).await?; } - - logger::outro(msg_ecosystem_initialized(&list_of_chains.join(","))); + logger::outro(msg_ecosystem_initialized(&chains.join(","))); Ok(()) } -async fn init( +async fn init_ecosystem( init_args: &mut EcosystemInitArgsFinal, shell: &Shell, ecosystem_config: &EcosystemConfig, @@ -146,7 +118,7 @@ async fn init( if !init_args.skip_contract_compilation_override { build_da_contracts(shell, &ecosystem_config.link_to_code)?; build_l1_contracts(shell, &ecosystem_config.link_to_code)?; - build_system_contracts(shell, &ecosystem_config.link_to_code)?; + build_system_contracts(shell.clone(), ecosystem_config.link_to_code.clone())?; build_l2_contracts(shell, &ecosystem_config.link_to_code)?; } spinner.finish(); @@ -190,10 +162,7 @@ async fn deploy_erc20( .with_rpc_url(l1_rpc_url) .with_broadcast(); - forge = fill_forge_private_key( - forge, - ecosystem_config.get_wallets()?.deployer_private_key(), - )?; + forge = fill_forge_private_key(forge, ecosystem_config.get_wallets()?.deployer.as_ref())?; let spinner = Spinner::new(MSG_DEPLOYING_ERC20_SPINNER); check_the_balance(&forge).await?; @@ -301,7 +270,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.ecosystem_contracts.bridgehub_proxy_addr, &forge_args, l1_rpc_url.clone(), @@ -312,7 +281,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.ecosystem_contracts.bridgehub_proxy_addr, &forge_args, l1_rpc_url.clone(), @@ -323,7 +292,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config.bridges.shared.l1_address, &forge_args, l1_rpc_url.clone(), @@ -337,7 +306,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .state_transition_proxy_addr, @@ -350,7 +319,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.chain_admin_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .state_transition_proxy_addr, @@ -363,7 +332,7 @@ async fn deploy_ecosystem_inner( shell, config, contracts_config.l1.governance_addr, - config.get_wallets()?.governor_private_key(), + &config.get_wallets()?.governor, contracts_config .ecosystem_contracts .stm_deployment_tracker_proxy_addr, @@ -374,3 +343,57 @@ async fn deploy_ecosystem_inner( Ok(contracts_config) } + +async fn init_chains( + init_args: &EcosystemInitArgs, + final_init_args: &EcosystemInitArgsFinal, + shell: &Shell, + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result> { + // If the name of chain passed then we deploy exactly this chain otherwise deploy all chains + let list_of_chains = if let Some(name) = global_config().chain_name.clone() { + vec![name] + } else { + ecosystem_config.list_of_chains() + }; + // Set default values for dev mode + let mut deploy_paymaster = init_args.deploy_paymaster; + let mut genesis_args = init_args.get_genesis_args().clone(); + if final_init_args.dev { + deploy_paymaster = Some(true); + genesis_args.dev = true; + } + // Can't initialize multiple chains with the same DB + if list_of_chains.len() > 1 { + genesis_args.reset_db_names(); + } + // Initialize chains + for chain_name in &list_of_chains { + logger::info(msg_initializing_chain(chain_name)); + let chain_config = ecosystem_config + .load_chain(Some(chain_name.clone())) + .context(msg_chain_load_err(chain_name))?; + + let chain_init_args = chain::args::init::InitArgs { + forge_args: final_init_args.forge_args.clone(), + server_db_url: genesis_args.server_db_url.clone(), + server_db_name: genesis_args.server_db_name.clone(), + dont_drop: genesis_args.dont_drop, + deploy_paymaster, + l1_rpc_url: Some(final_init_args.ecosystem.l1_rpc_url.clone()), + no_port_reallocation: final_init_args.no_port_reallocation, + dev: final_init_args.dev, + skip_submodules_checkout: final_init_args.skip_submodules_checkout, + }; + let final_chain_init_args = chain_init_args.fill_values_with_prompt(&chain_config); + + chain::init::init( + &final_chain_init_args, + shell, + ecosystem_config, + &chain_config, + ) + .await?; + } + Ok(list_of_chains) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/setup_observability.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/setup_observability.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/setup_observability.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs b/zkstack_cli/crates/zkstack/src/commands/ecosystem/utils.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/ecosystem/utils.rs rename to zkstack_cli/crates/zkstack/src/commands/ecosystem/utils.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/backend.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/backend.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs b/zkstack_cli/crates/zkstack/src/commands/explorer/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/explorer/run.rs rename to zkstack_cli/crates/zkstack/src/commands/explorer/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/prepare_configs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/prepare_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/prepare_configs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/args/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/args/run.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/args/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs similarity index 82% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs index e513a3669e0..8f5f8352458 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zkstack_cli/crates/zkstack/src/commands/external_node/prepare_configs.rs @@ -6,12 +6,12 @@ use config::{ external_node::ENConfig, set_rocks_db_config, traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; use zksync_config::configs::{ - consensus::{ConsensusSecrets, NodeSecretKey, Secret}, + consensus::{ConsensusConfig, ConsensusSecrets, NodeSecretKey, Secret}, DatabaseSecrets, L1Secrets, }; use zksync_consensus_crypto::TextFmt; @@ -19,14 +19,13 @@ use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, - defaults::PORT_RANGE_END, messages::{ msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PREPARING_EN_CONFIGS, }, utils::{ - consensus::{get_consensus_config, node_public_key}, + consensus::node_public_key, ports::EcosystemPortsScanner, rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, @@ -76,21 +75,15 @@ fn prepare_configs( )?, main_node_rate_limit_rps: None, gateway_url: None, + bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); + general_en.consensus_config = None; let main_node_consensus_config = general .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; - - // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` - ports.add_port_info( - main_node_consensus_config.server_addr.port(), - "Main node consensus".to_string(), - ); - let offset = ((config.id - 1) * 100) as u16; - let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; - let consensus_port = ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut en_consensus_config = main_node_consensus_config.clone(); let mut gossip_static_outbound = BTreeMap::new(); let main_node_public_key = node_public_key( @@ -100,13 +93,8 @@ fn prepare_configs( .context(MSG_CONSENSUS_SECRETS_MISSING_ERR)?, )? .context(MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR)?; - gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); - - let en_consensus_config = - get_consensus_config(config, consensus_port, None, Some(gossip_static_outbound))?; - general_en.consensus_config = Some(en_consensus_config.clone()); - en_consensus_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.gossip_static_outbound = gossip_static_outbound; // Set secrets config let node_key = roles::node::SecretKey::generate().encode(); @@ -128,16 +116,25 @@ fn prepare_configs( }), data_availability: None, }; - secrets.save_with_base_path(shell, en_configs_path)?; + let dirs = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::ExternalNode)?; set_rocks_db_config(&mut general_en, dirs)?; + general_en.save_with_base_path(shell, en_configs_path)?; en_config.save_with_base_path(shell, en_configs_path)?; + en_consensus_config.save_with_base_path(shell, en_configs_path)?; + secrets.save_with_base_path(shell, en_configs_path)?; + let offset = 0; // This is zero because general_en ports already have a chain offset ports.allocate_ports_in_yaml( shell, &GeneralConfig::get_path_with_base_path(en_configs_path), - 0, // This is zero because general_en ports already have a chain offset + offset, + )?; + ports.allocate_ports_in_yaml( + shell, + &ConsensusConfig::get_path_with_base_path(en_configs_path), + offset, )?; Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs b/zkstack_cli/crates/zkstack/src/commands/external_node/run.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs rename to zkstack_cli/crates/zkstack/src/commands/external_node/run.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zkstack_cli/crates/zkstack/src/commands/mod.rs similarity index 86% rename from zk_toolbox/crates/zk_inception/src/commands/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/mod.rs index 78a46797602..b5319cbc6bf 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zkstack_cli/crates/zkstack/src/commands/mod.rs @@ -1,8 +1,10 @@ pub mod args; +pub mod autocomplete; pub mod chain; pub mod consensus; pub mod containers; pub mod contract_verifier; +pub mod dev; pub mod ecosystem; pub mod explorer; pub mod external_node; diff --git a/zk_toolbox/crates/zk_inception/src/commands/portal.rs b/zkstack_cli/crates/zkstack/src/commands/portal.rs similarity index 98% rename from zk_toolbox/crates/zk_inception/src/commands/portal.rs rename to zkstack_cli/crates/zkstack/src/commands/portal.rs index 5bf21121177..f9e7fe35860 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/portal.rs +++ b/zkstack_cli/crates/zkstack/src/commands/portal.rs @@ -107,7 +107,7 @@ async fn validate_portal_config( continue; } // Append missing chain, chain might not be initialized, so ignoring errors - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain_name.clone())) { if let Ok(portal_chain_config) = build_portal_chain_config(&chain_config).await { portal_config.add_chain_config(&portal_chain_config); } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/compressor_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/compressor_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/compressor_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs similarity index 92% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs index 94fea1389d2..fab79899302 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init.rs @@ -33,6 +33,9 @@ use crate::{ #[derive(Debug, Clone, Parser, Default)] pub struct ProverInitArgs { + #[clap(long)] + pub dev: bool, + // Proof store object #[clap(long)] pub proof_store_dir: Option, @@ -58,7 +61,7 @@ pub struct ProverInitArgs { pub bellman_cuda: Option, #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub setup_compressor_keys: Option, + pub setup_compressor_key: Option, #[clap(flatten)] pub compressor_keys_args: CompressorKeysArgs, @@ -228,6 +231,10 @@ impl ProverInitArgs { ) -> anyhow::Result { logger::info(MSG_GETTING_PROOF_STORE_CONFIG); + if self.dev { + return Ok(self.handle_file_backed_config(Some(DEFAULT_PROOF_STORE_DIR.to_string()))); + } + if self.proof_store_dir.is_some() { return Ok(self.handle_file_backed_config(self.proof_store_dir.clone())); } @@ -277,6 +284,11 @@ impl ProverInitArgs { shell: &Shell, ) -> anyhow::Result> { logger::info(MSG_GETTING_PUBLIC_STORE_CONFIG); + + if self.dev { + return Ok(None); + } + let shall_save_to_public_bucket = self .shall_save_to_public_bucket .unwrap_or_else(|| PromptConfirm::new(MSG_SAVE_TO_PUBLIC_BUCKET_PROMPT).ask()); @@ -345,7 +357,13 @@ impl ProverInitArgs { &self, default_path: &str, ) -> Option { - let download_key = self.clone().setup_compressor_keys.unwrap_or_else(|| { + if self.dev { + return Some(CompressorKeysArgs { + path: Some(default_path.to_string()), + }); + } + + let download_key = self.clone().setup_compressor_key.unwrap_or_else(|| { PromptConfirm::new(MSG_DOWNLOAD_SETUP_COMPRESSOR_KEY_PROMPT) .default(false) .ask() @@ -363,6 +381,9 @@ impl ProverInitArgs { } fn fill_setup_keys_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } let args = self.setup_keys_args.clone(); if self.setup_keys.unwrap_or_else(|| { @@ -475,6 +496,10 @@ impl ProverInitArgs { } fn fill_bellman_cuda_values_with_prompt(&self) -> Option { + if self.dev { + return None; + } + let args = self.bellman_cuda_config.clone(); if self.bellman_cuda.unwrap_or_else(|| { PromptConfirm::new(MSG_INITIALIZE_BELLMAN_CUDA_PROMPT) @@ -488,6 +513,10 @@ impl ProverInitArgs { } fn get_cloud_type_with_prompt(&self) -> CloudConnectionMode { + if self.dev { + return CloudConnectionMode::Local; + } + let cloud_type = self.cloud_type.clone().unwrap_or_else(|| { PromptSelect::new( MSG_CLOUD_TYPE_PROMPT, @@ -503,25 +532,32 @@ impl ProverInitArgs { &self, config: &ChainConfig, ) -> Option { - let setup_database = self - .setup_database - .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); + let setup_database = self.dev + || self + .setup_database + .unwrap_or_else(|| PromptConfirm::new("Do you want to setup the database?").ask()); if setup_database { let DBNames { prover_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); - let dont_drop = self.dont_drop.unwrap_or_else(|| { - !PromptConfirm::new("Do you want to drop the database?") - .default(true) - .ask() - }); + let dont_drop = if !self.dev { + self.dont_drop.unwrap_or_else(|| { + !PromptConfirm::new("Do you want to drop the database?") + .default(true) + .ask() + }) + } else { + false + }; - if self.use_default.unwrap_or_else(|| { - PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) - .default(true) - .ask() - }) { + if self.dev + || self.use_default.unwrap_or_else(|| { + PromptConfirm::new(MSG_USE_DEFAULT_DATABASES_HELP) + .default(true) + .ask() + }) + { Some(ProverDatabaseConfig { database_config: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop, diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs similarity index 58% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs index ba204b0be9e..98a5c78be2a 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/init_bellman_cuda.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/init_bellman_cuda.rs @@ -10,7 +10,9 @@ use crate::messages::{ #[derive(Debug, Clone, Parser, Default, Serialize, Deserialize)] pub struct InitBellmanCudaArgs { - #[clap(long)] + #[clap(long, conflicts_with_all(["bellman_cuda_dir"]))] + pub clone: bool, + #[clap(long, conflicts_with_all(["clone"]))] pub bellman_cuda_dir: Option, } @@ -31,19 +33,26 @@ impl std::fmt::Display for BellmanCudaPathSelection { impl InitBellmanCudaArgs { pub fn fill_values_with_prompt(self) -> InitBellmanCudaArgs { - let bellman_cuda_dir = self.bellman_cuda_dir.unwrap_or_else(|| { - match PromptSelect::new( - MSG_BELLMAN_CUDA_ORIGIN_SELECT, - BellmanCudaPathSelection::iter(), - ) - .ask() - { - BellmanCudaPathSelection::Clone => "".to_string(), - BellmanCudaPathSelection::Path => Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask(), - } - }); + let bellman_cuda_dir = if self.clone { + "".to_string() + } else { + self.bellman_cuda_dir.unwrap_or_else(|| { + match PromptSelect::new( + MSG_BELLMAN_CUDA_ORIGIN_SELECT, + BellmanCudaPathSelection::iter(), + ) + .ask() + { + BellmanCudaPathSelection::Clone => "".to_string(), + BellmanCudaPathSelection::Path => { + Prompt::new(MSG_BELLMAN_CUDA_DIR_PROMPT).ask() + } + } + }) + }; InitBellmanCudaArgs { + clone: self.clone, bellman_cuda_dir: Some(bellman_cuda_dir), } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs similarity index 77% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs index 59a82152f1f..b79af777673 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/args/run.rs @@ -8,7 +8,8 @@ use strum::{EnumIter, IntoEnumIterator}; use crate::{ consts::{ - COMPRESSOR_BINARY_NAME, COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, + CIRCUIT_PROVER_BINARY_NAME, CIRCUIT_PROVER_DOCKER_IMAGE, COMPRESSOR_BINARY_NAME, + COMPRESSOR_DOCKER_IMAGE, PROVER_BINARY_NAME, PROVER_DOCKER_IMAGE, PROVER_GATEWAY_BINARY_NAME, PROVER_GATEWAY_DOCKER_IMAGE, PROVER_JOB_MONITOR_BINARY_NAME, PROVER_JOB_MONITOR_DOCKER_IMAGE, WITNESS_GENERATOR_BINARY_NAME, WITNESS_GENERATOR_DOCKER_IMAGE, WITNESS_VECTOR_GENERATOR_BINARY_NAME, @@ -30,8 +31,12 @@ pub struct ProverRunArgs { pub witness_vector_generator_args: WitnessVectorGeneratorArgs, #[clap(flatten)] pub fri_prover_args: FriProverRunArgs, + #[clap(flatten)] + pub circuit_prover_args: CircuitProverArgs, #[clap(long)] pub docker: Option, + #[clap(long)] + pub tag: Option, } #[derive( @@ -46,6 +51,8 @@ pub enum ProverComponent { WitnessVectorGenerator, #[strum(to_string = "Prover")] Prover, + #[strum(to_string = "CircuitProver")] + CircuitProver, #[strum(to_string = "Compressor")] Compressor, #[strum(to_string = "ProverJobMonitor")] @@ -59,6 +66,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_DOCKER_IMAGE, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE, Self::Prover => PROVER_DOCKER_IMAGE, + Self::CircuitProver => CIRCUIT_PROVER_DOCKER_IMAGE, Self::Compressor => COMPRESSOR_DOCKER_IMAGE, Self::ProverJobMonitor => PROVER_JOB_MONITOR_DOCKER_IMAGE, } @@ -70,6 +78,7 @@ impl ProverComponent { Self::WitnessGenerator => WITNESS_GENERATOR_BINARY_NAME, Self::WitnessVectorGenerator => WITNESS_VECTOR_GENERATOR_BINARY_NAME, Self::Prover => PROVER_BINARY_NAME, + Self::CircuitProver => CIRCUIT_PROVER_BINARY_NAME, Self::Compressor => COMPRESSOR_BINARY_NAME, Self::ProverJobMonitor => PROVER_JOB_MONITOR_BINARY_NAME, } @@ -78,10 +87,10 @@ impl ProverComponent { pub fn get_application_args(&self, in_docker: bool) -> anyhow::Result> { let mut application_args = vec![]; - if self == &Self::Prover || self == &Self::Compressor { + if self == &Self::Prover || self == &Self::Compressor || self == &Self::CircuitProver { if in_docker { application_args.push("--gpus=all".to_string()); - } else { + } else if self != &Self::CircuitProver { application_args.push("--features=gpu".to_string()); } } @@ -160,6 +169,26 @@ impl ProverComponent { )); }; } + Self::CircuitProver => { + if args.circuit_prover_args.max_allocation.is_some() { + additional_args.push(format!( + "--max-allocation={}", + args.fri_prover_args.max_allocation.unwrap() + )); + }; + if args + .circuit_prover_args + .witness_vector_generator_count + .is_some() + { + additional_args.push(format!( + "--witness-vector-generator-count={}", + args.circuit_prover_args + .witness_vector_generator_count + .unwrap() + )); + }; + } _ => {} }; @@ -211,6 +240,37 @@ impl WitnessVectorGeneratorArgs { } } +#[derive(Debug, Clone, Parser, Default)] +pub struct CircuitProverArgs { + #[clap(long)] + pub witness_vector_generator_count: Option, + #[clap(long)] + pub max_allocation: Option, +} + +impl CircuitProverArgs { + pub fn fill_values_with_prompt( + self, + component: ProverComponent, + ) -> anyhow::Result { + if component != ProverComponent::CircuitProver { + return Ok(Self::default()); + } + + let witness_vector_generator_count = + self.witness_vector_generator_count.unwrap_or_else(|| { + Prompt::new("Number of WVG jobs to run in parallel") + .default("1") + .ask() + }); + + Ok(CircuitProverArgs { + witness_vector_generator_count: Some(witness_vector_generator_count), + max_allocation: self.max_allocation, + }) + } +} + #[derive(Debug, Clone, Parser, Default)] pub struct FriProverRunArgs { /// Memory allocation limit in bytes (for prover component) @@ -232,18 +292,26 @@ impl ProverRunArgs { .witness_vector_generator_args .fill_values_with_prompt(component)?; + let circuit_prover_args = self + .circuit_prover_args + .fill_values_with_prompt(component)?; + let docker = self.docker.unwrap_or_else(|| { Prompt::new("Do you want to run Docker image for the component?") .default("false") .ask() }); + let tag = self.tag.unwrap_or("latest2.0".to_string()); + Ok(ProverRunArgs { component: Some(component), witness_generator_args, witness_vector_generator_args, fri_prover_args: self.fri_prover_args, + circuit_prover_args, docker: Some(docker), + tag: Some(tag), }) } } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/args/setup_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/args/setup_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/args/setup_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs similarity index 80% rename from zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs index 703ecc18c4c..a3d40c95728 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/compressor_keys.rs @@ -1,7 +1,7 @@ use anyhow::Context; -use common::{check_prerequisites, cmd::Cmd, spinner::Spinner, WGET_PREREQUISITE}; +use common::spinner::Spinner; use config::{get_link_to_prover, EcosystemConfig, GeneralConfig}; -use xshell::{cmd, Shell}; +use xshell::Shell; use super::args::compressor_keys::CompressorKeysArgs; use crate::messages::{ @@ -35,7 +35,6 @@ pub(crate) fn download_compressor_key( general_config: &mut GeneralConfig, path: &str, ) -> anyhow::Result<()> { - check_prerequisites(shell, &WGET_PREREQUISITE, false); let spinner = Spinner::new(MSG_DOWNLOADING_SETUP_COMPRESSOR_KEY_SPINNER); let mut compressor_config: zksync_config::configs::FriProofCompressorConfig = general_config .proof_compressor_config @@ -47,14 +46,13 @@ pub(crate) fn download_compressor_key( let url = compressor_config.universal_setup_download_url; let path = std::path::Path::new(path); - let parent = path.parent().expect(MSG_SETUP_KEY_PATH_ERROR); - let file_name = path.file_name().expect(MSG_SETUP_KEY_PATH_ERROR); - Cmd::new(cmd!(shell, "wget {url} -P {parent}")).run()?; + let client = reqwest::blocking::Client::builder() + .timeout(std::time::Duration::from_secs(600)) + .build()?; - if file_name != "setup_2^24.key" { - Cmd::new(cmd!(shell, "mv {parent}/setup_2^24.key {path}")).run()?; - } + let response = client.get(url).send()?.bytes()?; + shell.write_file(path, &response)?; spinner.finish(); Ok(()) diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs b/zkstack_cli/crates/zkstack/src/commands/prover/gcs.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/gcs.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/gcs.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zkstack_cli/crates/zkstack/src/commands/prover/init.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/init.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/init.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs b/zkstack_cli/crates/zkstack/src/commands/prover/init_bellman_cuda.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/init_bellman_cuda.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/init_bellman_cuda.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs b/zkstack_cli/crates/zkstack/src/commands/prover/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/mod.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs similarity index 88% rename from zk_toolbox/crates/zk_inception/src/commands/prover/run.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/run.rs index ed2f5b41a86..85495d12404 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zkstack_cli/crates/zkstack/src/commands/prover/run.rs @@ -8,7 +8,8 @@ use xshell::{cmd, Shell}; use super::args::run::{ProverComponent, ProverRunArgs}; use crate::messages::{ MSG_BELLMAN_CUDA_DIR_ERR, MSG_CHAIN_NOT_FOUND_ERR, MSG_MISSING_COMPONENT_ERR, - MSG_RUNNING_COMPRESSOR, MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, + MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR, MSG_RUNNING_COMPRESSOR, + MSG_RUNNING_COMPRESSOR_ERR, MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR, MSG_RUNNING_PROVER_GATEWAY, MSG_RUNNING_PROVER_GATEWAY_ERR, MSG_RUNNING_PROVER_JOB_MONITOR, MSG_RUNNING_PROVER_JOB_MONITOR_ERR, MSG_RUNNING_WITNESS_GENERATOR, MSG_RUNNING_WITNESS_GENERATOR_ERR, MSG_RUNNING_WITNESS_VECTOR_GENERATOR, @@ -32,7 +33,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let application_args = component.get_application_args(in_docker)?; let additional_args = - component.get_additional_args(in_docker, args, &chain, &path_to_ecosystem)?; + component.get_additional_args(in_docker, args.clone(), &chain, &path_to_ecosystem)?; let (message, error) = match component { ProverComponent::WitnessGenerator => ( @@ -49,6 +50,12 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() } (MSG_RUNNING_PROVER, MSG_RUNNING_PROVER_ERR) } + ProverComponent::CircuitProver => { + if !in_docker { + check_prerequisites(shell, &GPU_PREREQUISITES, false); + } + (MSG_RUNNING_CIRCUIT_PROVER, MSG_RUNNING_CIRCUIT_PROVER_ERR) + } ProverComponent::Compressor => { if !in_docker { check_prerequisites(shell, &GPU_PREREQUISITES, false); @@ -76,6 +83,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() run_dockerized_component( shell, component.image_name(), + &args.tag.unwrap(), &application_args, &additional_args, message, @@ -103,6 +111,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() fn run_dockerized_component( shell: &Shell, image_name: &str, + tag: &str, application_args: &[String], args: &[String], message: &'static str, @@ -117,7 +126,7 @@ fn run_dockerized_component( let mut cmd = Cmd::new(cmd!( shell, - "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" + "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name}:{tag} {args...}" )); cmd = cmd.with_force_run(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs b/zkstack_cli/crates/zkstack/src/commands/prover/setup_keys.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/prover/setup_keys.rs rename to zkstack_cli/crates/zkstack/src/commands/prover/setup_keys.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zkstack_cli/crates/zkstack/src/commands/server.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/commands/server.rs rename to zkstack_cli/crates/zkstack/src/commands/server.rs diff --git a/zk_toolbox/crates/zk_inception/src/commands/update.rs b/zkstack_cli/crates/zkstack/src/commands/update.rs similarity index 95% rename from zk_toolbox/crates/zk_inception/src/commands/update.rs rename to zkstack_cli/crates/zkstack/src/commands/update.rs index 5cb7208ffd0..534d490e6ca 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/update.rs +++ b/zkstack_cli/crates/zkstack/src/commands/update.rs @@ -8,11 +8,10 @@ use common::{ yaml::{merge_yaml, ConfigDiff}, }; use config::{ - traits::ReadConfigWithBasePath, ChainConfig, EcosystemConfig, CONTRACTS_FILE, EN_CONFIG_FILE, - ERA_OBSERBAVILITY_DIR, GENERAL_FILE, GENESIS_FILE, SECRETS_FILE, + ChainConfig, EcosystemConfig, CONTRACTS_FILE, EN_CONFIG_FILE, ERA_OBSERBAVILITY_DIR, + GENERAL_FILE, GENESIS_FILE, SECRETS_FILE, }; use xshell::Shell; -use zksync_config::configs::Secrets; use super::args::UpdateArgs; use crate::{ @@ -183,7 +182,7 @@ async fn update_chain( )?; } - let secrets = Secrets::read_with_base_path(shell, secrets)?; + let secrets = chain.get_secrets_config()?; if let Some(db) = secrets.database { if let Some(url) = db.server_url { diff --git a/zk_toolbox/crates/zk_inception/src/consts.rs b/zkstack_cli/crates/zkstack/src/consts.rs similarity index 61% rename from zk_toolbox/crates/zk_inception/src/consts.rs rename to zkstack_cli/crates/zkstack/src/consts.rs index 9f81847e333..b7c4d2a2070 100644 --- a/zk_toolbox/crates/zk_inception/src/consts.rs +++ b/zkstack_cli/crates/zkstack/src/consts.rs @@ -1,5 +1,3 @@ -use std::net::{IpAddr, Ipv4Addr}; - pub const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; pub const MINIMUM_BALANCE_FOR_WALLET: u128 = 5000000000000000000; @@ -12,27 +10,6 @@ pub const DEFAULT_UNSIGNED_TRANSACTIONS_DIR: &str = "transactions"; pub const BELLMAN_CUDA_DIR: &str = "era-bellman-cuda"; pub const L2_BASE_TOKEN_ADDRESS: &str = "0x000000000000000000000000000000000000800A"; -#[allow(non_upper_case_globals)] -const kB: usize = 1024; - -/// Max payload size for consensus in bytes -pub const MAX_PAYLOAD_SIZE: usize = 2_500_000; -/// Max batch size for consensus in bytes -/// Compute a default batch size, so operators are not caught out by the missing setting -/// while we're still working on batch syncing. The batch interval is ~1 minute, -/// so there will be ~60 blocks, and an Ethereum Merkle proof is ~1kB, but under high -/// traffic there can be thousands of huge transactions that quickly fill up blocks -/// and there could be more blocks in a batch then expected. We chose a generous -/// limit so as not to prevent any legitimate batch from being transmitted. -pub const MAX_BATCH_SIZE: usize = MAX_PAYLOAD_SIZE * 5000 + kB; -/// Gossip dynamic inbound limit for consensus -pub const GOSSIP_DYNAMIC_INBOUND_LIMIT: usize = 100; - -/// Public address for consensus -pub const CONSENSUS_PUBLIC_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::UNSPECIFIED); -/// Server address for consensus -pub const CONSENSUS_SERVER_ADDRESS_HOST: IpAddr = IpAddr::V4(Ipv4Addr::LOCALHOST); - /// Path to the JS runtime config for the block-explorer-app docker container to be mounted to pub const EXPLORER_APP_DOCKER_CONFIG_PATH: &str = "/usr/src/app/packages/app/dist/config.js"; pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; @@ -40,18 +17,19 @@ pub const EXPLORER_APP_DOCKER_IMAGE: &str = "matterlabs/block-explorer-app"; pub const PORTAL_DOCKER_CONFIG_PATH: &str = "/usr/src/app/dist/config.js"; pub const PORTAL_DOCKER_IMAGE: &str = "matterlabs/dapp-portal"; -pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway:latest2.0"; -pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator:latest2.0"; -pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = - "matterlabs/witness-vector-generator:latest2.0"; -pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri:latest2.0"; -pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor:latest2.0"; -pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor:latest2.0"; +pub const PROVER_GATEWAY_DOCKER_IMAGE: &str = "matterlabs/prover-fri-gateway"; +pub const WITNESS_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-generator"; +pub const WITNESS_VECTOR_GENERATOR_DOCKER_IMAGE: &str = "matterlabs/witness-vector-generator"; +pub const PROVER_DOCKER_IMAGE: &str = "matterlabs/prover-gpu-fri"; +pub const CIRCUIT_PROVER_DOCKER_IMAGE: &str = "matterlabs/circuit-prover-gpu"; +pub const COMPRESSOR_DOCKER_IMAGE: &str = "matterlabs/proof-fri-gpu-compressor"; +pub const PROVER_JOB_MONITOR_DOCKER_IMAGE: &str = "matterlabs/prover-job-monitor"; pub const PROVER_GATEWAY_BINARY_NAME: &str = "zksync_prover_fri_gateway"; pub const WITNESS_GENERATOR_BINARY_NAME: &str = "zksync_witness_generator"; pub const WITNESS_VECTOR_GENERATOR_BINARY_NAME: &str = "zksync_witness_vector_generator"; pub const PROVER_BINARY_NAME: &str = "zksync_prover_fri"; +pub const CIRCUIT_PROVER_BINARY_NAME: &str = "zksync_circuit_prover"; pub const COMPRESSOR_BINARY_NAME: &str = "zksync_proof_fri_compressor"; pub const PROVER_JOB_MONITOR_BINARY_NAME: &str = "zksync_prover_job_monitor"; diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zkstack_cli/crates/zkstack/src/defaults.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/defaults.rs rename to zkstack_cli/crates/zkstack/src/defaults.rs diff --git a/zk_toolbox/crates/zk_inception/src/external_node.rs b/zkstack_cli/crates/zkstack/src/external_node.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/external_node.rs rename to zkstack_cli/crates/zkstack/src/external_node.rs diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zkstack_cli/crates/zkstack/src/main.rs similarity index 56% rename from zk_toolbox/crates/zk_inception/src/main.rs rename to zkstack_cli/crates/zkstack/src/main.rs index 0af9922d0c4..3ebe26a4fa2 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zkstack_cli/crates/zkstack/src/main.rs @@ -1,7 +1,8 @@ use clap::{command, Parser, Subcommand}; use commands::{ - args::{ContainersArgs, UpdateArgs}, + args::{AutocompleteArgs, ContainersArgs, UpdateArgs}, contract_verifier::ContractVerifierCommands, + dev::DevCommands, }; use common::{ check_general_prerequisites, @@ -28,30 +29,36 @@ mod utils; #[derive(Parser, Debug)] #[command( + name = "zkstack", version = version_message(env!("CARGO_PKG_VERSION")), about )] -struct Inception { +struct ZkStack { #[command(subcommand)] - command: InceptionSubcommands, + command: ZkStackSubcommands, #[clap(flatten)] - global: InceptionGlobalArgs, + global: ZkStackGlobalArgs, } #[derive(Subcommand, Debug)] -pub enum InceptionSubcommands { +pub enum ZkStackSubcommands { + /// Create shell autocompletion files + Autocomplete(AutocompleteArgs), /// Ecosystem related commands #[command(subcommand, alias = "e")] - Ecosystem(EcosystemCommands), + Ecosystem(Box), /// Chain related commands #[command(subcommand, alias = "c")] - Chain(ChainCommands), + Chain(Box), + /// Supervisor related commands + #[command(subcommand)] + Dev(DevCommands), /// Prover related commands #[command(subcommand, alias = "p")] Prover(ProverCommands), /// Run server Server(RunServerArgs), - /// External Node related commands + /// External Node related commands #[command(subcommand, alias = "en")] ExternalNode(ExternalNodeCommands), /// Run containers for local development @@ -65,18 +72,20 @@ pub enum InceptionSubcommands { /// Run block-explorer #[command(subcommand)] Explorer(ExplorerCommands), - /// Update ZKsync + /// Consensus utilities #[command(subcommand)] Consensus(consensus::Command), + /// Update ZKsync #[command(alias = "u")] Update(UpdateArgs), + /// Print markdown help #[command(hide = true)] Markdown, } #[derive(Parser, Debug)] #[clap(next_help_heading = "Global options")] -struct InceptionGlobalArgs { +struct ZkStackGlobalArgs { /// Verbose mode #[clap(short, long, global = true)] verbose: bool, @@ -94,8 +103,20 @@ async fn main() -> anyhow::Result<()> { // We must parse arguments before printing the intro, because some autogenerated // Clap commands (like `--version` would look odd otherwise). - let inception_args = Inception::parse(); + let zkstack_args = ZkStack::parse(); + + match run_subcommand(zkstack_args).await { + Ok(_) => {} + Err(error) => { + log_error(error); + std::process::exit(1); + } + } + + Ok(()) +} +async fn run_subcommand(zkstack_args: ZkStack) -> anyhow::Result<()> { init_prompt_theme(); logger::new_empty_line(); @@ -103,51 +124,39 @@ async fn main() -> anyhow::Result<()> { let shell = Shell::new().unwrap(); - init_global_config_inner(&shell, &inception_args.global)?; + init_global_config_inner(&shell, &zkstack_args.global)?; if !global_config().ignore_prerequisites { check_general_prerequisites(&shell); } - match run_subcommand(inception_args, &shell).await { - Ok(_) => {} - Err(error) => { - log_error(error); - std::process::exit(1); - } - } - Ok(()) -} - -async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Result<()> { - match inception_args.command { - InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, args).await?, - InceptionSubcommands::Chain(args) => commands::chain::run(shell, args).await?, - InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, - InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, - InceptionSubcommands::Containers(args) => commands::containers::run(shell, args)?, - InceptionSubcommands::ExternalNode(args) => { - commands::external_node::run(shell, args).await? + match zkstack_args.command { + ZkStackSubcommands::Autocomplete(args) => commands::autocomplete::run(args)?, + ZkStackSubcommands::Ecosystem(args) => commands::ecosystem::run(&shell, *args).await?, + ZkStackSubcommands::Chain(args) => commands::chain::run(&shell, *args).await?, + ZkStackSubcommands::Dev(args) => commands::dev::run(&shell, args).await?, + ZkStackSubcommands::Prover(args) => commands::prover::run(&shell, args).await?, + ZkStackSubcommands::Server(args) => commands::server::run(&shell, args)?, + ZkStackSubcommands::Containers(args) => commands::containers::run(&shell, args)?, + ZkStackSubcommands::ExternalNode(args) => { + commands::external_node::run(&shell, args).await? } - InceptionSubcommands::ContractVerifier(args) => { - commands::contract_verifier::run(shell, args).await? + ZkStackSubcommands::ContractVerifier(args) => { + commands::contract_verifier::run(&shell, args).await? } - InceptionSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, - InceptionSubcommands::Consensus(cmd) => cmd.run(shell).await?, - InceptionSubcommands::Portal => commands::portal::run(shell).await?, - InceptionSubcommands::Update(args) => commands::update::run(shell, args).await?, - InceptionSubcommands::Markdown => { - clap_markdown::print_help_markdown::(); + ZkStackSubcommands::Explorer(args) => commands::explorer::run(&shell, args).await?, + ZkStackSubcommands::Consensus(cmd) => cmd.run(&shell).await?, + ZkStackSubcommands::Portal => commands::portal::run(&shell).await?, + ZkStackSubcommands::Update(args) => commands::update::run(&shell, args).await?, + ZkStackSubcommands::Markdown => { + clap_markdown::print_help_markdown::(); } } Ok(()) } -fn init_global_config_inner( - shell: &Shell, - inception_args: &InceptionGlobalArgs, -) -> anyhow::Result<()> { - if let Some(name) = &inception_args.chain { +fn init_global_config_inner(shell: &Shell, zkstack_args: &ZkStackGlobalArgs) -> anyhow::Result<()> { + if let Some(name) = &zkstack_args.chain { if let Ok(config) = EcosystemConfig::from_file(shell) { let chains = config.list_of_chains(); if !chains.contains(name) { @@ -160,9 +169,9 @@ fn init_global_config_inner( } } init_global_config(GlobalConfig { - verbose: inception_args.verbose, - chain_name: inception_args.chain.clone(), - ignore_prerequisites: inception_args.ignore_prerequisites, + verbose: zkstack_args.verbose, + chain_name: zkstack_args.chain.clone(), + ignore_prerequisites: zkstack_args.ignore_prerequisites, }); Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zkstack_cli/crates/zkstack/src/messages.rs similarity index 94% rename from zk_toolbox/crates/zk_inception/src/messages.rs rename to zkstack_cli/crates/zkstack/src/messages.rs index c539afc5144..a985b4238bd 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zkstack_cli/crates/zkstack/src/messages.rs @@ -15,6 +15,15 @@ pub(super) const MSG_SELECTED_CONFIG: &str = "Selected config"; pub(super) const MSG_CHAIN_NOT_INITIALIZED: &str = "Chain not initialized. Please create a chain first"; pub(super) const MSG_ARGS_VALIDATOR_ERR: &str = "Invalid arguments"; +pub(super) const MSG_DEV_ARG_HELP: &str = + "Use defaults for all options and flags. Suitable for local development"; + +/// Autocomplete message +pub(super) fn msg_generate_autocomplete_file(filename: &str) -> String { + format!("Generating completion file: {filename}") +} +pub(super) const MSG_OUTRO_AUTOCOMPLETE_GENERATION: &str = + "Autocompletion file correctly generated"; /// Ecosystem create related messages pub(super) const MSG_L1_NETWORK_HELP: &str = "L1 Network"; @@ -54,8 +63,6 @@ pub(super) fn msg_path_to_zksync_does_not_exist_err(path: &str) -> String { pub(super) const MSG_L1_RPC_URL_HELP: &str = "L1 RPC URL"; pub(super) const MSG_NO_PORT_REALLOCATION_HELP: &str = "Do not reallocate ports"; pub(super) const MSG_GENESIS_ARGS_HELP: &str = "Genesis options"; -pub(super) const MSG_DEV_ARG_HELP: &str = - "Deploy ecosystem using all defaults. Suitable for local development"; pub(super) const MSG_OBSERVABILITY_HELP: &str = "Enable Grafana"; pub(super) const MSG_OBSERVABILITY_PROMPT: &str = "Do you want to setup observability? (Grafana)"; pub(super) const MSG_DEPLOY_ECOSYSTEM_PROMPT: &str = @@ -72,6 +79,10 @@ pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; pub(super) const MSG_INITIALIZING_ECOSYSTEM: &str = "Initializing ecosystem"; pub(super) const MSG_DEPLOYING_ERC20: &str = "Deploying ERC20 contracts"; pub(super) const MSG_CHAIN_INITIALIZED: &str = "Chain initialized successfully"; +pub(super) const MSG_CHAIN_CONFIGS_INITIALIZED: &str = "Chain configs were initialized"; +pub(super) const MSG_CHAIN_OWNERSHIP_TRANSFERRED: &str = + "Chain ownership was transferred successfully"; +pub(super) const MSG_CHAIN_REGISTERED: &str = "Chain registraion was successful"; pub(super) const MSG_DISTRIBUTING_ETH_SPINNER: &str = "Distributing eth..."; pub(super) const MSG_MINT_BASE_TOKEN_SPINNER: &str = "Minting base token to the governance addresses..."; @@ -101,7 +112,11 @@ pub(super) fn msg_initializing_chain(chain_name: &str) -> String { } pub(super) fn msg_ecosystem_initialized(chains: &str) -> String { - format!("Ecosystem initialized successfully with chains {chains}") + if chains.is_empty() { + "Ecosystem initialized successfully. You can initialize chain with `chain init`".to_string() + } else { + format!("Ecosystem initialized successfully with chains {chains}") + } } /// Ecosystem default related messages @@ -142,6 +157,7 @@ pub(super) const MSG_BASE_TOKEN_ADDRESS_HELP: &str = "Base token address"; pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_HELP: &str = "Base token nominator"; pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_HELP: &str = "Base token denominator"; pub(super) const MSG_SET_AS_DEFAULT_HELP: &str = "Set as default chain"; +pub(super) const MSG_EVM_EMULATOR_HELP: &str = "Enable EVM emulator"; pub(super) const MSG_CHAIN_NAME_PROMPT: &str = "What do you want to name the chain?"; pub(super) const MSG_CHAIN_ID_PROMPT: &str = "What's the chain id?"; pub(super) const MSG_WALLET_CREATION_PROMPT: &str = "Select how do you want to create the wallet"; @@ -156,6 +172,7 @@ pub(super) const MSG_BASE_TOKEN_PRICE_NOMINATOR_PROMPT: &str = pub(super) const MSG_BASE_TOKEN_PRICE_DENOMINATOR_PROMPT: &str = "What is the base token price denominator?"; pub(super) const MSG_SET_AS_DEFAULT_PROMPT: &str = "Set this chain as default?"; +pub(super) const MSG_EVM_EMULATOR_PROMPT: &str = "Enable EVM emulator?"; pub(super) const MSG_WALLET_PATH_INVALID_ERR: &str = "Invalid path"; pub(super) const MSG_NUMBER_VALIDATOR_NOT_ZERO_ERR: &str = "Number is not zero"; pub(super) const MSG_NUMBER_VALIDATOR_GREATHER_THAN_ZERO_ERR: &str = @@ -170,6 +187,9 @@ pub(super) const MSG_WALLET_CREATION_VALIDATOR_ERR: &str = "Localhost wallet is not supported for external networks"; pub(super) const MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND: &str = "Token Multiplier Setter not found. Specify it in a wallet config"; +pub(super) const MSG_EVM_EMULATOR_HASH_MISSING_ERR: &str = + "Impossible to initialize a chain with EVM emulator: the template genesis config \ + does not contain EVM emulator hash"; /// Chain genesis related messages pub(super) const MSG_L1_SECRETS_MUST_BE_PRESENTED: &str = "L1 secret must be presented"; @@ -188,6 +208,7 @@ pub(super) const MSG_INITIALIZING_SERVER_DATABASE: &str = "Initializing server d pub(super) const MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR: &str = "Failed to drop server database"; pub(super) const MSG_INITIALIZING_PROVER_DATABASE: &str = "Initializing prover database"; pub(super) const MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR: &str = "Failed to drop prover database"; +pub(super) const MSG_GENESIS_DATABASES_INITIALIZED: &str = "Databases initialized successfully"; /// Chain update related messages pub(super) const MSG_WALLETS_CONFIG_MUST_BE_PRESENT: &str = "Wallets configuration must be present"; @@ -288,7 +309,7 @@ pub(super) const MSG_EXPLORER_FAILED_TO_RUN_DOCKER_ERR: &str = pub(super) const MSG_EXPLORER_FAILED_TO_CREATE_CONFIG_ERR: &str = "Failed to create explorer config"; pub(super) const MSG_EXPLORER_FAILED_TO_FIND_ANY_CHAIN_ERR: &str = - "Failed to find any valid chain to run explorer for. Did you run `zk_inception explorer init`?"; + "Failed to find any valid chain to run explorer for. Did you run `zkstack explorer init`?"; pub(super) const MSG_EXPLORER_INITIALIZED: &str = "Explorer has been initialized successfully"; pub(super) fn msg_explorer_initializing_database_for(chain: &str) -> String { format!("Initializing explorer database for {chain} chain") @@ -303,7 +324,7 @@ pub(super) fn msg_explorer_starting_on(host: &str, port: u16) -> String { format!("Starting explorer on http://{host}:{port}") } pub(super) fn msg_explorer_chain_not_initialized(chain: &str) -> String { - format!("Chain {chain} is not initialized for explorer: run `zk_inception explorer init --chain {chain}` first") + format!("Chain {chain} is not initialized for explorer: run `zkstack explorer init --chain {chain}` first") } /// Forge utils related messages @@ -343,6 +364,7 @@ pub(super) const MSG_RUNNING_PROVER_JOB_MONITOR: &str = "Running prover job moni pub(super) const MSG_RUNNING_WITNESS_GENERATOR: &str = "Running witness generator"; pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR: &str = "Running witness vector generator"; pub(super) const MSG_RUNNING_PROVER: &str = "Running prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER: &str = "Running circuit prover"; pub(super) const MSG_RUNNING_COMPRESSOR: &str = "Running compressor"; pub(super) const MSG_RUN_COMPONENT_PROMPT: &str = "What component do you want to run?"; pub(super) const MSG_RUNNING_PROVER_GATEWAY_ERR: &str = "Failed to run prover gateway"; @@ -351,6 +373,7 @@ pub(super) const MSG_RUNNING_WITNESS_VECTOR_GENERATOR_ERR: &str = "Failed to run witness vector generator"; pub(super) const MSG_RUNNING_COMPRESSOR_ERR: &str = "Failed to run compressor"; pub(super) const MSG_RUNNING_PROVER_ERR: &str = "Failed to run prover"; +pub(super) const MSG_RUNNING_CIRCUIT_PROVER_ERR: &str = "Failed to run circuit prover"; pub(super) const MSG_PROOF_STORE_CONFIG_PROMPT: &str = "Select where you would like to store the proofs"; pub(super) const MSG_PROOF_STORE_DIR_PROMPT: &str = diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zkstack_cli/crates/zkstack/src/utils/consensus.rs similarity index 70% rename from zk_toolbox/crates/zk_inception/src/utils/consensus.rs rename to zkstack_cli/crates/zkstack/src/utils/consensus.rs index 2979b4df0c1..946d28a33fb 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zkstack_cli/crates/zkstack/src/utils/consensus.rs @@ -1,24 +1,14 @@ -use std::{ - collections::{BTreeMap, BTreeSet}, - net::SocketAddr, -}; - use anyhow::Context as _; use config::ChainConfig; use secrecy::{ExposeSecret, Secret}; use zksync_config::configs::consensus::{ - AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, - NodePublicKey, NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, - WeightedAttester, WeightedValidator, + AttesterPublicKey, AttesterSecretKey, ConsensusSecrets, GenesisSpec, NodePublicKey, + NodeSecretKey, ProtocolVersion, ValidatorPublicKey, ValidatorSecretKey, WeightedAttester, + WeightedValidator, }; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_roles::{attester, node, validator}; -use crate::consts::{ - CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, - MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, -}; - pub(crate) fn parse_attester_committee( attesters: &[WeightedAttester], ) -> anyhow::Result { @@ -48,32 +38,6 @@ pub struct ConsensusPublicKeys { attester_key: attester::PublicKey, } -pub fn get_consensus_config( - chain_config: &ChainConfig, - consensus_port: u16, - consensus_keys: Option, - gossip_static_outbound: Option>, -) -> anyhow::Result { - let genesis_spec = - consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); - - let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, consensus_port); - let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, consensus_port); - - Ok(ConsensusConfig { - server_addr, - public_addr: Host(public_addr.encode()), - genesis_spec, - max_payload_size: MAX_PAYLOAD_SIZE, - gossip_dynamic_inbound_limit: GOSSIP_DYNAMIC_INBOUND_LIMIT, - max_batch_size: MAX_BATCH_SIZE, - gossip_static_inbound: BTreeSet::new(), - gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), - rpc: None, - debug_page_addr: None, - }) -} - pub fn generate_consensus_keys() -> ConsensusSecretKeys { ConsensusSecretKeys { validator_key: validator::SecretKey::generate(), diff --git a/zk_toolbox/crates/zk_inception/src/utils/forge.rs b/zkstack_cli/crates/zkstack/src/utils/forge.rs similarity index 74% rename from zk_toolbox/crates/zk_inception/src/utils/forge.rs rename to zkstack_cli/crates/zkstack/src/utils/forge.rs index cabc8ff7566..355cf7b5f93 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/forge.rs +++ b/zkstack_cli/crates/zkstack/src/utils/forge.rs @@ -1,6 +1,6 @@ -use anyhow::anyhow; -use common::forge::ForgeScript; -use ethers::types::{H256, U256}; +use anyhow::Context as _; +use common::{forge::ForgeScript, wallets::Wallet}; +use ethers::types::U256; use crate::{ consts::MINIMUM_BALANCE_FOR_WALLET, @@ -9,10 +9,14 @@ use crate::{ pub fn fill_forge_private_key( mut forge: ForgeScript, - private_key: Option, + wallet: Option<&Wallet>, ) -> anyhow::Result { if !forge.wallet_args_passed() { - forge = forge.with_private_key(private_key.ok_or(anyhow!(MSG_DEPLOYER_PK_NOT_SET_ERR))?); + forge = forge.with_private_key( + wallet + .and_then(|w| w.private_key_h256()) + .context(MSG_DEPLOYER_PK_NOT_SET_ERR)?, + ); } Ok(forge) } diff --git a/zk_toolbox/crates/zk_inception/src/utils/mod.rs b/zkstack_cli/crates/zkstack/src/utils/mod.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/mod.rs rename to zkstack_cli/crates/zkstack/src/utils/mod.rs diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zkstack_cli/crates/zkstack/src/utils/ports.rs similarity index 82% rename from zk_toolbox/crates/zk_inception/src/utils/ports.rs rename to zkstack_cli/crates/zkstack/src/utils/ports.rs index 5102b4fd9c6..6c299b99913 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/ports.rs +++ b/zkstack_cli/crates/zkstack/src/utils/ports.rs @@ -1,4 +1,4 @@ -use std::{collections::HashMap, fmt, ops::Range, path::Path}; +use std::{collections::HashMap, fmt, net::SocketAddr, ops::Range, path::Path}; use anyhow::{bail, Context, Result}; use config::{ @@ -12,7 +12,24 @@ use xshell::Shell; use crate::defaults::{DEFAULT_OBSERVABILITY_PORT, PORT_RANGE_END, PORT_RANGE_START}; pub struct EcosystemPorts { - pub ports: HashMap>, + pub ports: HashMap>, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +pub struct PortInfo { + pub port: u16, + pub file_path: String, + pub description: String, +} + +impl fmt::Display for PortInfo { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "[{}] {} >{}", + self.file_path, self.description, self.port + ) + } } impl EcosystemPorts { @@ -20,14 +37,19 @@ impl EcosystemPorts { self.ports.contains_key(&port) } - pub fn add_port_info(&mut self, port: u16, info: String) { + pub fn add_port_info(&mut self, port: u16, info: PortInfo) { + let info = PortInfo { + port, + file_path: info.file_path, + description: info.description, + }; self.ports.entry(port).or_default().push(info); } - pub fn allocate_port(&mut self, range: Range, info: String) -> anyhow::Result { + pub fn allocate_port(&mut self, range: Range, info: PortInfo) -> anyhow::Result { for port in range { if !self.is_port_assigned(port) { - self.add_port_info(port, info.to_string()); + self.add_port_info(port, info); return Ok(port); } } @@ -48,10 +70,15 @@ impl EcosystemPorts { let mut new_ports = HashMap::new(); for (desc, port) in config.get_default_ports()? { let mut new_port = port + offset; + let port_info = PortInfo { + port: new_port, + description: desc.clone(), + ..Default::default() + }; if self.is_port_assigned(new_port) { - new_port = self.allocate_port(port_range.clone(), desc.clone())?; + new_port = self.allocate_port(port_range.clone(), port_info)?; } else { - self.add_port_info(new_port, desc.to_string()); + self.add_port_info(new_port, port_info); } new_ports.insert(desc, new_port); } @@ -89,7 +116,7 @@ impl EcosystemPorts { if let Some(port) = val.as_u64().and_then(|p| u16::try_from(p).ok()) { let new_port = self.allocate_port( (port + offset as u16)..PORT_RANGE_END, - "".to_string(), + PortInfo::default(), )?; *val = Value::Number(serde_yaml::Number::from(new_port)); updated_ports.insert(port, new_port); @@ -109,6 +136,12 @@ impl EcosystemPorts { } } } + } else if key.as_str().map(|s| s.ends_with("addr")).unwrap_or(false) { + let socket_addr = val.as_str().unwrap().parse::()?; + if let Some(new_port) = updated_ports.get(&socket_addr.port()) { + let new_socket_addr = SocketAddr::new(socket_addr.ip(), *new_port); + *val = Value::String(new_socket_addr.to_string()); + } } } // Continue traversing @@ -126,6 +159,19 @@ impl EcosystemPorts { Ok(()) } + + pub fn group_by_file_path(&self) -> HashMap> { + let mut grouped_ports: HashMap> = HashMap::new(); + for port_infos in self.ports.values() { + for port_info in port_infos { + grouped_ports + .entry(port_info.file_path.clone()) + .or_default() + .push(port_info.clone()); + } + } + grouped_ports + } } impl fmt::Display for EcosystemPorts { @@ -169,7 +215,7 @@ impl EcosystemPortsScanner { // - Ecosystem directory (docker-compose files) let mut dirs = vec![ecosystem_config.config.clone()]; for chain in ecosystem_config.list_of_chains() { - if let Some(chain_config) = ecosystem_config.load_chain(Some(chain)) { + if let Ok(chain_config) = ecosystem_config.load_chain(Some(chain)) { dirs.push(chain_config.configs.clone()); if let Some(external_node_config_path) = &chain_config.external_node_config_path { dirs.push(external_node_config_path.clone()); @@ -272,8 +318,12 @@ impl EcosystemPortsScanner { ecosystem_ports: &mut EcosystemPorts, ) { if let Some(port) = value.as_u64().and_then(|p| u16::try_from(p).ok()) { - let description = format!("[{}] {}", file_path.display(), path); - ecosystem_ports.add_port_info(port, description); + let info = PortInfo { + port, + file_path: file_path.display().to_string(), + description: path.to_string(), + }; + ecosystem_ports.add_port_info(port, info); } } @@ -312,8 +362,12 @@ impl EcosystemPortsScanner { file_path: &Path, ecosystem_ports: &mut EcosystemPorts, ) { - let description = format!("[{}] {}", file_path.display(), path); - ecosystem_ports.add_port_info(port, description); + let info = PortInfo { + port, + file_path: file_path.display().to_string(), + description: path.to_string(), + }; + ecosystem_ports.add_port_info(port, info); } } @@ -354,7 +408,7 @@ impl ConfigWithChainPorts for ExplorerBackendPorts { mod tests { use std::path::PathBuf; - use crate::utils::ports::{EcosystemPorts, EcosystemPortsScanner}; + use crate::utils::ports::{EcosystemPorts, EcosystemPortsScanner, PortInfo}; #[test] fn test_traverse_yaml() { @@ -408,21 +462,28 @@ mod tests { // Check description: let port_3050_info = ecosystem_ports.ports.get(&3050).unwrap(); assert_eq!(port_3050_info.len(), 1); - assert_eq!( - port_3050_info[0], - "[test_config.yaml] api:web3_json_rpc:http_port" - ); + let expected_port_3050_info = PortInfo { + port: 3050, + file_path: "test_config.yaml".to_string(), + description: "api:web3_json_rpc:http_port".to_string(), + }; + assert_eq!(port_3050_info[0], expected_port_3050_info); let port_3412_info = ecosystem_ports.ports.get(&3412).unwrap(); assert_eq!(port_3412_info.len(), 2); - assert_eq!( - port_3412_info[0], - "[test_config.yaml] api:prometheus:listener_port" - ); - assert_eq!( - port_3412_info[1], - "[test_config.yaml] prometheus:listener_port" - ); + let expected_port_3412_info_0 = PortInfo { + port: 3412, + file_path: "test_config.yaml".to_string(), + description: "api:prometheus:listener_port".to_string(), + }; + let expected_port_3412_info_1 = PortInfo { + port: 3412, + file_path: "test_config.yaml".to_string(), + description: "prometheus:listener_port".to_string(), + }; + + assert_eq!(port_3412_info[0], expected_port_3412_info_0); + assert_eq!(port_3412_info[1], expected_port_3412_info_1); } #[test] @@ -445,7 +506,12 @@ mod tests { assert!(ecosystem_ports.is_port_assigned(3050)); let port_info = ecosystem_ports.ports.get(&3050).unwrap(); - assert_eq!(port_info[0], "[test_config.yaml] web3_json_rpc:http_port"); + let expected_port_info = PortInfo { + port: 3050, + file_path: "test_config.yaml".to_string(), + description: "web3_json_rpc:http_port".to_string(), + }; + assert_eq!(port_info[0], expected_port_info); } #[test] @@ -476,7 +542,12 @@ mod tests { assert!(ecosystem_ports.is_port_assigned(8546)); let port_info = ecosystem_ports.ports.get(&8546).unwrap(); - assert_eq!(port_info[0], "[test_config.yaml] reth:ports"); + let expected_port_info = PortInfo { + port: 8546, + file_path: "test_config.yaml".to_string(), + description: "reth:ports".to_string(), + }; + assert_eq!(port_info[0], expected_port_info); } #[test] diff --git a/zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs b/zkstack_cli/crates/zkstack/src/utils/rocks_db.rs similarity index 100% rename from zk_toolbox/crates/zk_inception/src/utils/rocks_db.rs rename to zkstack_cli/crates/zkstack/src/utils/rocks_db.rs diff --git a/zkstack_cli/rust-toolchain b/zkstack_cli/rust-toolchain new file mode 100644 index 00000000000..03c040b91f1 --- /dev/null +++ b/zkstack_cli/rust-toolchain @@ -0,0 +1 @@ +nightly-2024-08-01 diff --git a/zkstack_cli/zkstackup/README.md b/zkstack_cli/zkstackup/README.md new file mode 100644 index 00000000000..4977c4641e0 --- /dev/null +++ b/zkstack_cli/zkstackup/README.md @@ -0,0 +1,70 @@ +# zkstackup - ZK Stack CLI Installer + +`zkstackup` is a script designed to simplify the installation of +[ZK Stack CLI](https://github.com/matter-labs/zksync-era/tree/main/zkstack_cli). It allows you to install the tool from +a local directory or directly from a GitHub repository. + +## Getting Started + +To install `zkstackup`, run the following command: + +```bash +curl -L https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/install | bash +``` + +After installing `zkstackup`, you can use it to install `zkstack_cli` with: + +```bash +zkstackup +``` + +## Usage + +The `zkstackup` script provides various options for installing ZK Stack CLI: + +### Options + +- `-p, --path ` + Specify a local path to install ZK Stack CLI from. This option is ignored if `--repo` is provided. + +- `-r, --repo ` + GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". + +- `-b, --branch ` + Git branch to use when installing from a repository. Ignored if `--commit` or `--version` is provided. + +- `-c, --commit ` + Git commit hash to use when installing from a repository. Ignored if `--branch` or `--version` is provided. + +- `-v, --version ` + Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. + +### Local Installation + +If you provide a local path using the `-p` or `--path` option, `zkstackup` will install ZK Stack CLI from that +directory. Note that repository-specific arguments (`--repo`, `--branch`, `--commit`, `--version`) will be ignored in +this case to preserve git state. + +### Repository Installation + +By default, `zkstackup` installs ZK Stack CLI from the "matter-labs/zksync-era" GitHub repository. You can specify a +different repository, branch, commit, or version using the respective options. If multiple arguments are provided, +`zkstackup` will prioritize them as follows: + +- `--version` +- `--commit` +- `--branch` + +### Examples + +**Install from a GitHub repository with a specific version:** + +```bash +zkstackup --repo matter-labs/zksync-era --version 0.1.1 +``` + +**Install from a local path:** + +```bash +zkstackup --path /path/to/local/zkstack_cli +``` diff --git a/zkstack_cli/zkstackup/install b/zkstack_cli/zkstackup/install new file mode 100755 index 00000000000..849f0699bc3 --- /dev/null +++ b/zkstack_cli/zkstackup/install @@ -0,0 +1,120 @@ +#!/usr/bin/env bash +set -eo pipefail + +BIN_URL="https://raw.githubusercontent.com/matter-labs/zksync-era/main/zkstack_cli/zkstackup/zkstackup" + +BIN_DIR="$HOME/.local/bin" +BIN_PATH="$BIN_DIR/zkstackup" + +main() { + parse_args "$@" + + mkdir -p "$BIN_DIR" + + if [ -n "$ZKSTACKUP_PATH" ]; then + cp -r "$ZKSTACKUP_PATH" "$BIN_DIR" + else + curl -sSfL "$BIN_URL" -o "$BIN_PATH" + fi + + chmod +x "$BIN_PATH" + echo "zkstackup: successfully installed in ${BIN_DIR}." + + add_bin_folder_to_path +} + +add_bin_folder_to_path() { + if [[ ":$PATH:" == *":${BIN_DIR}:"* ]]; then + echo "zkstackup: found ${BIN_DIR} in PATH" + exit 0 + fi + + case $SHELL in + */zsh) + PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" + ;; + */bash) + PROFILE="$HOME/.bashrc" + ;; + */fish) + PROFILE="$HOME/.config/fish/config.fish" + ;; + */ash) + PROFILE="$HOME/.profile" + ;; + *) + echo "zkstackup: could not detect shell, manually add ${BIN_DIR} to your PATH." + exit 1 + ;; + esac + + if [[ ! -f "$PROFILE" ]]; then + echo "zkstackup: Profile file $PROFILE does not exist, creating it." + touch "$PROFILE" + fi + + if [[ "$SHELL" == *"/fish"* ]]; then + echo -e "\n# Added by zkstackup\nfish_add_path -a $BIN_DIR" >>"$PROFILE" + echo "zkstackup: Added $BIN_DIR to PATH in $PROFILE using fish_add_path." + else + echo -e "\n# Added by zkstackup\nexport PATH=\"\$PATH:$BIN_DIR\"" >>"$PROFILE" + echo "zkstackup: Added $BIN_DIR to PATH in $PROFILE." + fi + + echo + echo "Added zkstackup to PATH." + echo "Run 'source $PROFILE' or start a new terminal session to use zkstackup." + echo "Then run 'zkstackup' to install ZK Stack CLI." +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --) + shift + break + ;; + -p | --path) + shift + ZKSTACKUP_PATH=$1 + ;; + -l | --local) + ZKSTACKUP_PATH="./" + ;; + -g | --global) + BIN_DIR="/usr/local/bin" + BIN_PATH="$BIN_DIR/zkstackup" + ;; + -h | --help) + usage + exit 0 + ;; + *) + err "Unknown argument: $1" + usage + exit 1 + ;; + esac + shift + done +} + + +usage() { + cat < Specify a local path to install zkstackup from. + -l, --local Install zkstackup from the current directory. + -g, --global Install zkstackup for all users. + -h, --help Show this help message and exit. + +Examples: + $(basename "$0") --path /path/to/zkstackup +EOF +} + +main "$@" diff --git a/zkstack_cli/zkstackup/zkstackup b/zkstack_cli/zkstackup/zkstackup new file mode 100755 index 00000000000..e91bbc17905 --- /dev/null +++ b/zkstack_cli/zkstackup/zkstackup @@ -0,0 +1,272 @@ +#!/usr/bin/env bash +set -eo pipefail + +LOCAL_DIR="$HOME/.local/" +BIN_DIR="$LOCAL_DIR/bin" + +BINS=() + +main() { + parse_args "$@" + + zkstack_banner + + check_prerequisites + mkdir -p "$BIN_DIR" + + BINS+=(zkstack) + + if [ -n "$ZKSTACKUP_PATH" ]; then + install_local + else + install_from_repo + fi + + zkstack_banner + + add_bin_folder_to_path + + for bin in "${BINS[@]}"; do + success "Installed $bin to $BIN_DIR/$bin" + done +} + +PREREQUISITES=(cargo git) + +check_prerequisites() { + say "Checking prerequisites" + + failed_prerequisites=() + for prerequisite in "${PREREQUISITES[@]}"; do + if ! check_prerequisite "$prerequisite"; then + failed_prerequisites+=("$prerequisite") + fi + done + if [ ${#failed_prerequisites[@]} -gt 0 ]; then + err "The following prerequisites are missing: ${failed_prerequisites[*]}" + exit 1 + fi +} + +check_prerequisite() { + command -v "$1" &>/dev/null +} + +parse_args() { + while [[ $# -gt 0 ]]; do + case $1 in + --) + shift + break + ;; + -p | --path) + shift + ZKSTACKUP_PATH=$1 + ;; + -l | --local) + ZKSTACKUP_PATH="./" + ;; + -g | --global) + LOCAL_DIR="/usr/local" + BIN_DIR="$LOCAL_DIR/bin" + ;; + -r | --repo) + shift + ZKSTACKUP_REPO=$1 + ;; + -b | --branch) + shift + ZKSTACKUP_BRANCH=$1 + ;; + -c | --commit) + shift + ZKSTACKUP_COMMIT=$1 + ;; + -v | --version) + shift + ZKSTACKUP_VERSION=$1 + ;; + -h | --help) + usage + exit 0 + ;; + *) + err "Unknown argument: $1" + usage + exit 1 + ;; + esac + shift + done +} + +usage() { + cat < Specify a local path to install ZK Stack CLI from. Ignored if --repo is provided. + -l, --local Install ZK Stack CLI from the current directory. Ignored if --repo is provided. + -g, --global Install ZK Stack CLI for all users. + -r, --repo GitHub repository to install from (e.g., "matter-labs/zksync-era"). Defaults to "matter-labs/zksync-era". + -b, --branch Git branch to use when installing from a repository. Ignored if --commit or --version is provided. + -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. + -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. + -h, --help Show this help message and exit. + +Examples: + $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 +EOF +} + +install_local() { + if [ ! -d "$ZKSTACKUP_PATH/zkstack_cli" ]; then + err "Path $ZKSTACKUP_PATH does not contain zkstack_cli" + exit 1 + fi + + if [ "$ZKSTACKUP_PATH" = "./" ]; then + if git rev-parse --is-inside-work-tree >/dev/null 2>&1; then + git config --local core.hooksPath || + git config --local core.hooksPath ./.githooks + fi + fi + + if [ -n "$ZKSTACKUP_BRANCH" ] || [ -n "$ZKSTACKUP_COMMIT" ] || [ -n "$ZKSTACKUP_VERSION" ] || [ -n "$ZKSTACKUP_REPO" ]; then + warn "Ignoring --repo, --branch, --commit and --version arguments when installing from local path" + fi + + say "Installing ZK Stack CLI from $ZKSTACKUP_PATH" + ensure cd "$ZKSTACKUP_PATH"/zkstack_cli + + for bin in "${BINS[@]}"; do + say "Installing $bin" + ensure cargo install --root $LOCAL_DIR --path ./crates/$bin --force + chmod +x "$BIN_DIR/$bin" + done +} + +install_from_repo() { + if [ -n "$ZKSTACKUP_PATH" ]; then + warn "Ignoring --path argument when installing from repository" + fi + + ZKSTACKUP_REPO=${ZKSTACKUP_REPO:-"matter-labs/zksync-era"} + + say "Installing ZK Stack CLI from $ZKSTACKUP_REPO" + + if [ -n "$ZKSTACKUP_VERSION" ]; then + if [ -n "$ZKSTACKUP_COMMIT" ] || [ -n "$ZKSTACKUP_BRANCH" ]; then + warn "Ignoring --commit and --branch arguments when installing by version" + fi + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --tag "zkstack_cli-v$ZKSTACKUP_VERSION" --locked "${BINS[@]}" --force + elif [ -n "$ZKSTACKUP_COMMIT" ]; then + if [ -n "$ZKSTACKUP_BRANCH" ]; then + warn "Ignoring --branch argument when installing by commit" + fi + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --rev "$ZKSTACKUP_COMMIT" --locked "${BINS[@]}" --force + elif [ -n "$ZKSTACKUP_BRANCH" ]; then + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --branch "$ZKSTACKUP_BRANCH" --locked "${BINS[@]}" --force + else + ensure cargo install --root $LOCAL_DIR --git "https://github.com/$ZKSTACKUP_REPO" --locked "${BINS[@]}" --force + fi +} + +add_bin_folder_to_path() { + if [[ ":$PATH:" == *":${BIN_DIR}:"* ]]; then + echo "found ${BIN_DIR} in PATH" + exit 0 + fi + + case $SHELL in + */zsh) + PROFILE="${ZDOTDIR-"$HOME"}/.zshenv" + ;; + */bash) + PROFILE="$HOME/.bashrc" + ;; + */fish) + PROFILE="$HOME/.config/fish/config.fish" + ;; + */ash) + PROFILE="$HOME/.profile" + ;; + *) + echo "could not detect shell, manually add ${BIN_DIR} to your PATH." + exit 1 + ;; + esac + + if [[ ! -f "$PROFILE" ]]; then + echo "Profile file $PROFILE does not exist, creating it." + touch "$PROFILE" + fi + + if [[ "$SHELL" == *"/fish"* ]]; then + echo -e "\n# Added by zkstackup\nfish_add_path -a $BIN_DIR" >>"$PROFILE" + echo "Added $BIN_DIR to PATH in $PROFILE using fish_add_path." + else + echo -e "\n# Added by zkstackup\nexport PATH=\"\$PATH:$BIN_DIR\"" >>"$PROFILE" + echo "Added $BIN_DIR to PATH in $PROFILE." + fi + + echo + echo "Added zkstack to PATH." + echo "Run 'source $PROFILE' or start a new terminal session to use zkstack." +} + +ensure() { + if ! "$@"; then + err "command failed: $*" + exit 1 + fi +} + +say() { + local action="${1%% *}" + local rest="${1#"$action" }" + + echo -e "\033[1;32m$action\033[0m $rest" +} + +success() { + echo -e "\033[1;32m$1\033[0m" +} + +warn() { + echo -e "\033[1;33mWARNING: $1\033[0m" +} + +err() { + echo -e "\033[1;31mERROR: $1\033[0m" >&2 +} + +zkstack_banner() { + printf ' + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + + ███████╗██╗ ██╗ ███████╗████████╗ █████╗ ██████╗██╗ ██╗ + ╚══███╔╝██║ ██╔╝ ██╔════╝╚══██╔══╝██╔══██╗██╔════╝██║ ██╔╝ + ███╔╝ █████╔╝ ███████╗ ██║ ███████║██║ █████╔╝ + ███╔╝ ██╔═██╗ ╚════██║ ██║ ██╔══██║██║ ██╔═██╗ + ███████╗██║ ██╗ ███████║ ██║ ██║ ██║╚██████╗██║ ██╗ + ╚══════╝╚═╝ ╚═╝ ╚══════╝ ╚═╝ ╚═╝ ╚═╝ ╚═════╝╚═╝ ╚═╝ + + + A Comprehensive Toolkit for Creating and Managing ZK Stack Chains + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +Repo : https://github.com/matter-labs/zksync-era/ +Docs : https://docs.zksync.io/ +Contribute : https://github.com/matter-labs/zksync-era/pulls + +=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-= + +' +} + +main "$@"