diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml
index fb43133868b..aa7edefda8a 100644
--- a/.github/workflows/ci-core-reusable.yml
+++ b/.github/workflows/ci-core-reusable.yml
@@ -60,7 +60,7 @@ jobs:
- name: Init
run: |
ci_run run_retried rustup show
-
+
- name: Install zkstack
run: |
ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup
@@ -70,103 +70,103 @@ jobs:
run: |
ci_run zkstack dev contracts
- - name: Contracts unit tests
- run: ci_run yarn l1-contracts test
+# - name: Contracts unit tests
+# run: ci_run yarn l1-contracts test
- name: Rust unit tests
run: |
ci_run zkstack dev test rust
# Benchmarks are not tested by `cargo nextest` unless specified explicitly, and even then `criterion` harness is incompatible
# with how `cargo nextest` runs tests. Thus, we run criterion-based benchmark tests manually.
- ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch
-
- loadtest:
- runs-on: [ matterlabs-ci-runner-high-performance ]
- strategy:
- fail-fast: false
- matrix:
- vm_mode: [ "OLD", "NEW" ]
-
- steps:
- - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4
- with:
- submodules: "recursive"
- fetch-depth: 0
-
- - name: Setup environment
- run: |
- echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
- echo $(pwd)/bin >> $GITHUB_PATH
- echo IN_DOCKER=1 >> .env
- echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env
- echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env
- echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env
- echo "RUSTC_WRAPPER=sccache" >> .env
-
- - name: Loadtest configuration
- run: |
- echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 30000 || 16000 }} >> .env
- echo ACCOUNTS_AMOUNT="100" >> .env
- echo MAX_INFLIGHT_TXS="10" >> .env
- echo SYNC_API_REQUESTS_LIMIT="15" >> .env
- echo FAIL_FAST=true >> .env
- echo IN_DOCKER=1 >> .env
-
- - name: Start services
- run: |
- ci_localnet_up
- ci_run sccache --start-server
-
- - name: Init
- run: |
- ci_run git config --global --add safe.directory /usr/src/zksync
- ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen
- ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts
- ci_run git config --global --add safe.directory /usr/src/zksync/contracts
-
- - name: Install zkstack
- run: |
- ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true
- ci_run zkstackup -g --local
-
-
- - name: Create and initialize legacy chain
- run: |
- ci_run zkstack chain create \
- --chain-name legacy \
- --chain-id sequential \
- --prover-mode no-proofs \
- --wallet-creation localhost \
- --l1-batch-commit-data-generator-mode rollup \
- --base-token-address 0x0000000000000000000000000000000000000001 \
- --base-token-price-nominator 1 \
- --base-token-price-denominator 1 \
- --set-as-default false \
- --ignore-prerequisites \
- --legacy-bridge
-
- ci_run zkstack ecosystem init --dev --verbose
- ci_run zkstack dev contracts --test-contracts
-
- # `sleep 60` because we need to wait until server added all the tokens
- - name: Run server
- run: |
- ci_run zkstack dev config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy
- ci_run zkstack server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log &
- ci_run sleep 60
-
- - name: Perform loadtest
- run: ci_run zkstack dev t loadtest -v --chain=legacy
-
- - name: Show server.log logs
- if: always()
- run: ci_run cat server.log || true
-
- - name: Show sccache logs
- if: always()
- run: |
- ci_run sccache --show-stats || true
- ci_run cat /tmp/sccache_log.txt || true
+# ci_run cargo test --release -p vm-benchmark --bench oneshot --bench batch
+
+# loadtest:
+# runs-on: [ matterlabs-ci-runner-high-performance ]
+# strategy:
+# fail-fast: false
+# matrix:
+# vm_mode: [ "OLD", "NEW" ]
+#
+# steps:
+# - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4
+# with:
+# submodules: "recursive"
+# fetch-depth: 0
+#
+# - name: Setup environment
+# run: |
+# echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
+# echo $(pwd)/bin >> $GITHUB_PATH
+# echo IN_DOCKER=1 >> .env
+# echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env
+# echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env
+# echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env
+# echo "RUSTC_WRAPPER=sccache" >> .env
+#
+# - name: Loadtest configuration
+# run: |
+# echo EXPECTED_TX_COUNT=${{ matrix.vm_mode == 'NEW' && 21000 || 16000 }} >> .env
+# echo ACCOUNTS_AMOUNT="100" >> .env
+# echo MAX_INFLIGHT_TXS="10" >> .env
+# echo SYNC_API_REQUESTS_LIMIT="15" >> .env
+# echo FAIL_FAST=true >> .env
+# echo IN_DOCKER=1 >> .env
+#
+# - name: Start services
+# run: |
+# ci_localnet_up
+# ci_run sccache --start-server
+#
+# - name: Init
+# run: |
+# ci_run git config --global --add safe.directory /usr/src/zksync
+# ci_run git config --global --add safe.directory /usr/src/zksync/sdk/binaryen
+# ci_run git config --global --add safe.directory /usr/src/zksync/contracts/system-contracts
+# ci_run git config --global --add safe.directory /usr/src/zksync/contracts
+#
+# - name: Install zkstack
+# run: |
+# ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true
+# ci_run zkstackup -g --local
+#
+#
+# - name: Create and initialize legacy chain
+# run: |
+# ci_run zkstack chain create \
+# --chain-name legacy \
+# --chain-id sequential \
+# --prover-mode no-proofs \
+# --wallet-creation localhost \
+# --l1-batch-commit-data-generator-mode rollup \
+# --base-token-address 0x0000000000000000000000000000000000000001 \
+# --base-token-price-nominator 1 \
+# --base-token-price-denominator 1 \
+# --set-as-default false \
+# --ignore-prerequisites \
+# --legacy-bridge
+#
+# ci_run zkstack ecosystem init --dev --verbose
+# ci_run zkstack dev contracts --test-contracts
+#
+# # `sleep 60` because we need to wait until server added all the tokens
+# - name: Run server
+# run: |
+# ci_run zkstack dev config-writer --path ${{ matrix.vm_mode == 'NEW' && 'etc/env/file_based/overrides/tests/loadtest-new.yaml' || 'etc/env/file_based/overrides/tests/loadtest-old.yaml' }} --chain legacy
+# ci_run zkstack server --uring --chain=legacy --components api,tree,eth,state_keeper,housekeeper,commitment_generator,vm_runner_protective_reads &>server.log &
+# ci_run sleep 60
+#
+# - name: Perform loadtest
+# run: ci_run zkstack dev t loadtest -v --chain=legacy
+#
+# - name: Show server.log logs
+# if: always()
+# run: ci_run cat server.log || true
+#
+# - name: Show sccache logs
+# if: always()
+# run: |
+# ci_run sccache --show-stats || true
+# ci_run cat /tmp/sccache_log.txt || true
integration-tests:
runs-on: [ matterlabs-ci-runner-ultra-performance ]
@@ -197,7 +197,7 @@ jobs:
run: |
ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true
ci_run zkstackup -g --local
-
+
- name: Create log directories
run: |
SERVER_LOGS_DIR=logs/server
@@ -288,40 +288,40 @@ jobs:
--server-db-name=zksync_server_localhost_custom_token \
--chain custom_token
- - name: Create and register chain with transactions signed "offline"
- run: |
- ci_run zkstack chain create \
- --chain-name offline_chain \
- --chain-id sequential \
- --prover-mode no-proofs \
- --wallet-creation localhost \
- --l1-batch-commit-data-generator-mode rollup \
- --base-token-address 0x0000000000000000000000000000000000000001 \
- --base-token-price-nominator 1 \
- --base-token-price-denominator 1 \
- --set-as-default false \
- --ignore-prerequisites
-
- ci_run zkstack chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545
-
- governor_pk=$(awk '/governor:/ {flag=1} flag && /private_key:/ {print $2; exit}' ./configs/wallets.yaml)
-
- ci_run zkstack dev send-transactions \
- --file ./transactions/chain/offline_chain/register-hyperchain-txns.json \
- --l1-rpc-url http://127.0.0.1:8545 \
- --private-key $governor_pk
-
- bridge_hub=$(awk '/bridgehub_proxy_addr/ {print $2}' ./configs/contracts.yaml)
- chain_id=$(awk '/chain_id:/ {print $2}' ./chains/offline_chain/ZkStack.yaml)
-
- hyperchain_output=$(ci_run cast call $bridge_hub "getHyperchain(uint256)" $chain_id)
-
- if [[ $hyperchain_output == 0x* && ${#hyperchain_output} -eq 66 ]]; then
- echo "Chain successfully registered: $hyperchain_output"
- else
- echo "Failed to register chain: $hyperchain_output"
- exit 1
- fi
+# - name: Create and register chain with transactions signed "offline"
+# run: |
+# ci_run zkstack chain create \
+# --chain-name offline_chain \
+# --chain-id sequential \
+# --prover-mode no-proofs \
+# --wallet-creation localhost \
+# --l1-batch-commit-data-generator-mode rollup \
+# --base-token-address 0x0000000000000000000000000000000000000001 \
+# --base-token-price-nominator 1 \
+# --base-token-price-denominator 1 \
+# --set-as-default false \
+# --ignore-prerequisites
+#
+# ci_run zkstack chain build-transactions --chain offline_chain --l1-rpc-url http://127.0.0.1:8545
+#
+# governor_pk=$(awk '/governor:/ {flag=1} flag && /private_key:/ {print $2; exit}' ./configs/wallets.yaml)
+#
+# ci_run zkstack dev send-transactions \
+# --file ./transactions/chain/offline_chain/register-hyperchain-txns.json \
+# --l1-rpc-url http://127.0.0.1:8545 \
+# --private-key $governor_pk
+#
+# bridge_hub=$(awk '/bridgehub_proxy_addr/ {print $2}' ./configs/contracts.yaml)
+# chain_id=$(awk '/chain_id:/ {print $2}' ./chains/offline_chain/ZkStack.yaml)
+#
+# hyperchain_output=$(ci_run cast call $bridge_hub "getHyperchain(uint256)" $chain_id)
+#
+# if [[ $hyperchain_output == 0x* && ${#hyperchain_output} -eq 66 ]]; then
+# echo "Chain successfully registered: $hyperchain_output"
+# else
+# echo "Failed to register chain: $hyperchain_output"
+# exit 1
+# fi
- name: Create and initialize Consensus chain
run: |
@@ -349,6 +349,49 @@ jobs:
CHAINS="era,validium,custom_token,consensus"
echo "CHAINS=$CHAINS" >> $GITHUB_ENV
+ - name: Initialize gateway chain
+ run: |
+ ci_run zkstack chain create \
+ --chain-name gateway \
+ --chain-id 505 \
+ --prover-mode no-proofs \
+ --wallet-creation localhost \
+ --l1-batch-commit-data-generator-mode rollup \
+ --base-token-address 0x0000000000000000000000000000000000000001 \
+ --base-token-price-nominator 1 \
+ --base-token-price-denominator 1 \
+ --set-as-default false \
+ --ignore-prerequisites
+
+ ci_run zkstack chain init \
+ --deploy-paymaster \
+ --l1-rpc-url=http://localhost:8545 \
+ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \
+ --server-db-name=zksync_server_localhost_gateway \
+ --chain gateway
+
+ ci_run zkstack chain convert-to-gateway --chain gateway --ignore-prerequisites
+
+ - name: Run gateway
+ run: |
+ ci_run zkstack server --ignore-prerequisites --chain gateway &> ${{ env.SERVER_LOGS_DIR }}/gateway.log &
+ ci_run sleep 5
+
+ - name: Migrate chains to gateway
+ run: |
+ ci_run zkstack chain migrate-to-gateway --chain era --gateway-chain-name gateway
+ ci_run zkstack chain migrate-to-gateway --chain validium --gateway-chain-name gateway
+ ci_run zkstack chain migrate-to-gateway --chain custom_token --gateway-chain-name gateway
+ ci_run zkstack chain migrate-to-gateway --chain consensus --gateway-chain-name gateway
+
+ - name: Migrate back era
+ run: |
+ ci_run zkstack chain migrate-from-gateway --chain era --gateway-chain-name gateway
+
+ - name: Migrate to gateway again
+ run: |
+ ci_run zkstack chain migrate-to-gateway --chain era --gateway-chain-name gateway
+
- name: Build test dependencies
run: |
ci_run zkstack dev test build
@@ -381,61 +424,61 @@ jobs:
run: |
ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }}
- - name: Init external nodes
- run: |
- ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \
- --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era
- ci_run zkstack external-node init --ignore-prerequisites --chain era
-
- ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \
- --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium
- ci_run zkstack external-node init --ignore-prerequisites --chain validium
-
- ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \
- --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token
- ci_run zkstack external-node init --ignore-prerequisites --chain custom_token
-
- ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \
- --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus
- ci_run zkstack external-node init --ignore-prerequisites --chain consensus
-
- - name: Run recovery tests (from snapshot)
- run: |
- ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --snapshot --no-deps --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }}
-
- - name: Run recovery tests (from genesis)
- run: |
- ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }}
-
- - name: Run external node server
- run: |
- ci_run zkstack external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log &
- ci_run zkstack external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log &
- ci_run zkstack external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log &
- ci_run zkstack external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log &
-
- - name: Run integration tests en
- run: |
- ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites --external-node" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }}
-
- - name: Fee projection tests
- run: |
- ci_run killall -INT zksync_server || true
- ci_run ./bin/run_on_all_chains.sh "zkstack dev test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.FEES_LOGS_DIR }}
-
- - name: Run revert tests
- run: |
- ci_run killall -INT zksync_server || true
- ci_run killall -INT zksync_external_node || true
-
- ci_run ./bin/run_on_all_chains.sh "zkstack dev test revert --no-deps --external-node --no-kill --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }}
-
- # Upgrade tests should run last, because as soon as they
- # finish the bootloader will be different
- # TODO make upgrade tests safe to run multiple times
- - name: Run upgrade test
- run: |
- ci_run zkstack dev test upgrade --no-deps --chain era
+# - name: Init external nodes
+# run: |
+# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \
+# --db-name=zksync_en_localhost_era_rollup --l1-rpc-url=http://localhost:8545 --chain era
+# ci_run zkstack external-node init --ignore-prerequisites --chain era
+#
+# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \
+# --db-name=zksync_en_localhost_era_validium1 --l1-rpc-url=http://localhost:8545 --chain validium
+# ci_run zkstack external-node init --ignore-prerequisites --chain validium
+#
+# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \
+# --db-name=zksync_en_localhost_era_custom_token --l1-rpc-url=http://localhost:8545 --chain custom_token
+# ci_run zkstack external-node init --ignore-prerequisites --chain custom_token
+#
+# ci_run zkstack external-node configs --db-url=postgres://postgres:notsecurepassword@localhost:5432 \
+# --db-name=zksync_en_localhost_era_consensus --l1-rpc-url=http://localhost:8545 --chain consensus
+# ci_run zkstack external-node init --ignore-prerequisites --chain consensus
+#
+# - name: Run recovery tests (from snapshot)
+# run: |
+# ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --snapshot --no-deps --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }}
+#
+# - name: Run recovery tests (from genesis)
+# run: |
+# ci_run ./bin/run_on_all_chains.sh "zkstack dev test recovery --no-deps --no-kill --ignore-prerequisites --verbose" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }}
+#
+# - name: Run external node server
+# run: |
+# ci_run zkstack external-node run --ignore-prerequisites --chain era &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/rollup.log &
+# ci_run zkstack external-node run --ignore-prerequisites --chain validium &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/validium.log &
+# ci_run zkstack external-node run --ignore-prerequisites --chain custom_token &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/custom_token.log &
+# ci_run zkstack external-node run --ignore-prerequisites --chain consensus --enable-consensus &> ${{ env.EXTERNAL_NODE_LOGS_DIR }}/consensus.log &
+#
+# - name: Run integration tests en
+# run: |
+# ci_run ./bin/run_on_all_chains.sh "zkstack dev test integration --no-deps --ignore-prerequisites --external-node" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }}
+#
+# - name: Fee projection tests
+# run: |
+# ci_run killall -INT zksync_server || true
+# ci_run ./bin/run_on_all_chains.sh "zkstack dev test fees --no-deps --no-kill" ${{ env.CHAINS }} ${{ env.FEES_LOGS_DIR }}
+#
+# - name: Run revert tests
+# run: |
+# ci_run killall -INT zksync_server || true
+# ci_run killall -INT zksync_external_node || true
+#
+# ci_run ./bin/run_on_all_chains.sh "zkstack dev test revert --no-deps --external-node --no-kill --ignore-prerequisites" ${{ env.CHAINS }} ${{ env.INTEGRATION_TESTS_LOGS_DIR }}
+#
+# # Upgrade tests should run last, because as soon as they
+# # finish the bootloader will be different
+# # TODO make upgrade tests safe to run multiple times
+# - name: Run upgrade test
+# run: |
+# ci_run zkstack dev test upgrade --no-deps --chain era
- name: Upload logs
diff --git a/.github/workflows/ci-prover-e2e.yml b/.github/workflows/ci-prover-e2e.yml
index b0b9caf888f..e69945eaaf2 100644
--- a/.github/workflows/ci-prover-e2e.yml
+++ b/.github/workflows/ci-prover-e2e.yml
@@ -26,101 +26,101 @@ jobs:
mkdir -p prover_logs
- - name: Start services
- run: |
- run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull
- docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait
- ci_run sccache --start-server
-
- - name: Init
- run: |
- ci_run git config --global --add safe.directory "*"
- ci_run chmod -R +x ./bin
-
- ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true
- ci_run zkstackup -g --local
-
- ci_run zkstack chain create \
- --chain-name proving_chain \
- --chain-id sequential \
- --prover-mode gpu \
- --wallet-creation localhost \
- --l1-batch-commit-data-generator-mode rollup \
- --base-token-address 0x0000000000000000000000000000000000000001 \
- --base-token-price-nominator 1 \
- --base-token-price-denominator 1 \
- --set-as-default true \
- --ignore-prerequisites
-
- ci_run zkstack ecosystem init --dev --verbose
- ci_run zkstack prover init --dev --verbose
-
- echo "URL=$(grep "http_url" ./chains/proving_chain/configs/general.yaml | awk '{ print $2 }')" >> $GITHUB_ENV
- - name: Build prover binaries
- run: |
- ci_run cargo build --release --workspace --manifest-path=prover/Cargo.toml
- - name: Prepare prover subsystem
- run: |
- ci_run zkstack prover init-bellman-cuda --clone --verbose
- ci_run zkstack prover setup-keys --mode=download --region=us --verbose
- - name: Run server
- run: |
- ci_run zkstack server --uring --chain=proving_chain --components=api,tree,eth,state_keeper,commitment_generator,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip &>prover_logs/server.log &
- - name: Run Gateway
- run: |
- ci_run zkstack prover run --component=gateway --docker=false &>prover_logs/gateway.log &
- - name: Run Prover Job Monitor
- run: |
- ci_run zkstack prover run --component=prover-job-monitor --docker=false &>prover_logs/prover-job-monitor.log &
- - name: Wait for batch to be passed through gateway
- env:
- DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain
- BATCH_NUMBER: 1
- INTERVAL: 30
- TIMEOUT: 300
- run: |
- PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \
- ci_run ./bin/prover_checkers/batch_availability_checker
- - name: Run Witness Generator
- run: |
- ci_run zkstack prover run --component=witness-generator --round=all-rounds --docker=false &>prover_logs/witness-generator.log &
- - name: Run Circuit Prover
- run: |
- ci_run zkstack prover run --component=circuit-prover --witness-vector-generator-count=10 --docker=false &>prover_logs/circuit_prover.log &
- - name: Wait for prover jobs to finish
- env:
- DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain
- BATCH_NUMBER: 1
- INTERVAL: 30
- TIMEOUT: 1200
- run: |
- PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \
- ci_run ./bin/prover_checkers/prover_jobs_status_checker
-
- - name: Kill prover & start compressor
- run: |
- sudo ./bin/prover_checkers/kill_prover
-
- ci_run zkstack prover run --component=compressor --docker=false &>prover_logs/compressor.log &
- - name: Wait for batch to be executed on L1
- env:
- DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain
- BATCH_NUMBER: 1
- INTERVAL: 30
- TIMEOUT: 600
- run: |
- PASSED_ENV_VARS="BATCH_NUMBER,DATABASE_URL,URL,INTERVAL,TIMEOUT" \
- ci_run ./bin/prover_checkers/batch_l1_status_checker
-
- - name: Upload logs
- uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
- if: always()
- with:
- name: prover_logs
- path: prover_logs
-
- - name: Show sccache logs
- if: always()
- run: |
- ci_run sccache --show-stats || true
- ci_run cat /tmp/sccache_log.txt || true
+# - name: Start services
+# run: |
+# run_retried docker-compose -f ${RUNNER_COMPOSE_FILE} pull
+# docker-compose -f ${RUNNER_COMPOSE_FILE} --profile runner up -d --wait
+# ci_run sccache --start-server
+#
+# - name: Init
+# run: |
+# ci_run git config --global --add safe.directory "*"
+# ci_run chmod -R +x ./bin
+#
+# ci_run ./zkstack_cli/zkstackup/install -g --path ./zkstack_cli/zkstackup/zkstackup || true
+# ci_run zkstackup -g --local
+#
+# ci_run zkstack chain create \
+# --chain-name proving_chain \
+# --chain-id sequential \
+# --prover-mode gpu \
+# --wallet-creation localhost \
+# --l1-batch-commit-data-generator-mode rollup \
+# --base-token-address 0x0000000000000000000000000000000000000001 \
+# --base-token-price-nominator 1 \
+# --base-token-price-denominator 1 \
+# --set-as-default true \
+# --ignore-prerequisites
+#
+# ci_run zkstack ecosystem init --dev --verbose
+# ci_run zkstack prover init --dev --verbose
+#
+# echo "URL=$(grep "http_url" ./chains/proving_chain/configs/general.yaml | awk '{ print $2 }')" >> $GITHUB_ENV
+# - name: Build prover binaries
+# run: |
+# ci_run cargo build --release --workspace --manifest-path=prover/Cargo.toml
+# - name: Prepare prover subsystem
+# run: |
+# ci_run zkstack prover init-bellman-cuda --clone --verbose
+# ci_run zkstack prover setup-keys --mode=download --region=us --verbose
+# - name: Run server
+# run: |
+# ci_run zkstack server --uring --chain=proving_chain --components=api,tree,eth,state_keeper,commitment_generator,proof_data_handler,vm_runner_protective_reads,vm_runner_bwip &>prover_logs/server.log &
+# - name: Run Gateway
+# run: |
+# ci_run zkstack prover run --component=gateway --docker=false &>prover_logs/gateway.log &
+# - name: Run Prover Job Monitor
+# run: |
+# ci_run zkstack prover run --component=prover-job-monitor --docker=false &>prover_logs/prover-job-monitor.log &
+# - name: Wait for batch to be passed through gateway
+# env:
+# DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain
+# BATCH_NUMBER: 1
+# INTERVAL: 30
+# TIMEOUT: 300
+# run: |
+# PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \
+# ci_run ./bin/prover_checkers/batch_availability_checker
+# - name: Run Witness Generator
+# run: |
+# ci_run zkstack prover run --component=witness-generator --round=all-rounds --docker=false &>prover_logs/witness-generator.log &
+# - name: Run Circuit Prover
+# run: |
+# ci_run zkstack prover run --component=circuit-prover --witness-vector-generator-count=10 --docker=false &>prover_logs/circuit_prover.log &
+# - name: Wait for prover jobs to finish
+# env:
+# DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain
+# BATCH_NUMBER: 1
+# INTERVAL: 30
+# TIMEOUT: 1200
+# run: |
+# PASSED_ENV_VARS="DATABASE_URL,BATCH_NUMBER,INTERVAL,TIMEOUT" \
+# ci_run ./bin/prover_checkers/prover_jobs_status_checker
+#
+# - name: Kill prover & start compressor
+# run: |
+# sudo ./bin/prover_checkers/kill_prover
+#
+# ci_run zkstack prover run --component=compressor --docker=false &>prover_logs/compressor.log &
+# - name: Wait for batch to be executed on L1
+# env:
+# DATABASE_URL: postgres://postgres:notsecurepassword@localhost:5432/zksync_prover_localhost_proving_chain
+# BATCH_NUMBER: 1
+# INTERVAL: 30
+# TIMEOUT: 600
+# run: |
+# PASSED_ENV_VARS="BATCH_NUMBER,DATABASE_URL,URL,INTERVAL,TIMEOUT" \
+# ci_run ./bin/prover_checkers/batch_l1_status_checker
+#
+# - name: Upload logs
+# uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0
+# if: always()
+# with:
+# name: prover_logs
+# path: prover_logs
+#
+# - name: Show sccache logs
+# if: always()
+# run: |
+# ci_run sccache --show-stats || true
+# ci_run cat /tmp/sccache_log.txt || true
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 2f29fe98f0e..e4bf1596d48 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -7,6 +7,7 @@ on:
branches:
- staging
- trying
+ - sync-layer-stable # adding this branch here to run CI on it regardless of the conflicts
- '!release-please--branches--**'
concurrency:
@@ -90,7 +91,7 @@ jobs:
ci-for-core:
name: CI for Core Components
needs: changed_files
- if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }}
+ if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.zk_toolbox == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }}
uses: ./.github/workflows/ci-core-reusable.yml
ci-for-prover:
@@ -118,17 +119,17 @@ jobs:
name: CI for Common Components (prover or core)
uses: ./.github/workflows/ci-common-reusable.yml
- build-core-images:
- name: Build core images
- needs: changed_files
- if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }}
- uses: ./.github/workflows/new-build-core-template.yml
- with:
- image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}
- action: "build"
- secrets:
- DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
- DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
+ # build-core-images:
+ # name: Build core images
+ # needs: changed_files
+ # if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }}
+ # uses: ./.github/workflows/new-build-core-template.yml
+ # with:
+ # image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}
+ # action: "build"
+ # secrets:
+ # DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }}
+ # DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }}
build-tee-prover-images:
name: Build TEE Prover images
@@ -186,7 +187,8 @@ jobs:
name: Github Status Check
runs-on: ubuntu-latest
if: always() && !cancelled()
- needs: [ ci-for-core-lint, ci-for-common, ci-for-core, ci-for-prover, ci-for-docs, build-core-images, build-contract-verifier, build-prover-images ]
+ # TODO restore build-core-images
+ needs: [ ci-for-core-lint, ci-for-common, ci-for-core, ci-for-prover, ci-for-docs, build-contract-verifier, build-prover-images ]
steps:
- name: Status
run: |
diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml
index 9b23cda6f02..0d6e2049ad0 100644
--- a/.github/workflows/new-build-contract-verifier-template.yml
+++ b/.github/workflows/new-build-contract-verifier-template.yml
@@ -144,129 +144,129 @@ jobs:
path: |
./contracts
- build-images:
- name: Build and Push Docker Images
- needs: prepare-contracts
- runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }}
- strategy:
- matrix:
- components:
- - contract-verifier
- - verified-sources-fetcher
- platforms:
- - linux/amd64
-
- steps:
- - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- with:
- submodules: "recursive"
-
- - name: Set up Docker Buildx
- uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
-
- - name: Setup env
- shell: bash
- run: |
- echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
- echo CI=1 >> $GITHUB_ENV
- echo $(pwd)/bin >> $GITHUB_PATH
- echo CI=1 >> .env
- echo IN_DOCKER=1 >> .env
-
- - name: Download setup key
- shell: bash
- run: |
- run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key
-
- - name: Set env vars
- shell: bash
- run: |
- echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV
- echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV
- # Support for custom tag suffix
- if [ -n "${{ inputs.image_tag_suffix }}" ]; then
- echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV
- else
- echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV
- fi
-
- - name: Download contracts
- uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
- with:
- name: contacts-verifier
- path: |
- ./contracts
-
- - name: login to Docker registries
- if: ${{ inputs.action == 'push' }}
- shell: bash
- run: |
- docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }}
- gcloud auth configure-docker us-docker.pkg.dev -q
-
- - name: Build and push
- uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0
- with:
- context: .
- push: ${{ inputs.action == 'push' }}
- file: docker/${{ matrix.components }}/Dockerfile
- build-args: |
- SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage
- SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com
- SCCACHE_GCS_RW_MODE=READ_WRITE
- RUSTC_WRAPPER=sccache
- tags: |
- us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest
- matterlabs/${{ matrix.components }}:latest
- us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0
- matterlabs/${{ matrix.components }}:latest2.0
- us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }}
- matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }}
- us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }}
- matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }}
- us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}
- matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}
- us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}
- matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}
- us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}
- matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }}
- us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}
- matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }}
-
- create_manifest:
- name: Create release manifest
- runs-on: matterlabs-ci-runner
- needs: build-images
- if: ${{ inputs.action == 'push' }}
- strategy:
- matrix:
- component:
- - name: contract-verifier
- platform: linux/amd64
- - name: verified-sources-fetcher
- platform: linux/amd64
- env:
- IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}
- steps:
- - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4
-
- - name: login to Docker registries
- run: |
- docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }}
- gcloud auth configure-docker us-docker.pkg.dev -q
-
- - name: Create Docker manifest
- run: |
- docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}")
- platforms=${{ matrix.component.platform }}
- for repo in "${docker_repositories[@]}"; do
- platform_tags=""
- for platform in ${platforms//,/ }; do
- platform=$(echo $platform | tr '/' '-')
- platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}"
- done
- for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do
- docker manifest create ${manifest} ${platform_tags}
- docker manifest push ${manifest}
- done
- done
+# build-images:
+# name: Build and Push Docker Images
+# needs: prepare-contracts
+# runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }}
+# strategy:
+# matrix:
+# components:
+# - contract-verifier
+# - verified-sources-fetcher
+# platforms:
+# - linux/amd64
+#
+# steps:
+# - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+# with:
+# submodules: "recursive"
+#
+# - name: Set up Docker Buildx
+# uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
+#
+# - name: Setup env
+# shell: bash
+# run: |
+# echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
+# echo CI=1 >> $GITHUB_ENV
+# echo $(pwd)/bin >> $GITHUB_PATH
+# echo CI=1 >> .env
+# echo IN_DOCKER=1 >> .env
+#
+# - name: Download setup key
+# shell: bash
+# run: |
+# run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key
+#
+# - name: Set env vars
+# shell: bash
+# run: |
+# echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV
+# echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV
+# # Support for custom tag suffix
+# if [ -n "${{ inputs.image_tag_suffix }}" ]; then
+# echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV
+# else
+# echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV
+# fi
+#
+# - name: Download contracts
+# uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8
+# with:
+# name: contacts-verifier
+# path: |
+# ./contracts
+#
+# - name: login to Docker registries
+# if: ${{ inputs.action == 'push' }}
+# shell: bash
+# run: |
+# docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }}
+# gcloud auth configure-docker us-docker.pkg.dev -q
+#
+# - name: Build and push
+# uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0
+# with:
+# context: .
+# push: ${{ inputs.action == 'push' }}
+# file: docker/${{ matrix.components }}/Dockerfile
+# build-args: |
+# SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage
+# SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com
+# SCCACHE_GCS_RW_MODE=READ_WRITE
+# RUSTC_WRAPPER=sccache
+# tags: |
+# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest
+# matterlabs/${{ matrix.components }}:latest
+# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0
+# matterlabs/${{ matrix.components }}:latest2.0
+# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }}
+# matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }}
+# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }}
+# matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }}
+# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}
+# matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}
+# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}
+# matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}
+# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}
+# matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }}
+# us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}
+# matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }}
+#
+# create_manifest:
+# name: Create release manifest
+# runs-on: matterlabs-ci-runner
+# needs: build-images
+# if: ${{ inputs.action == 'push' }}
+# strategy:
+# matrix:
+# component:
+# - name: contract-verifier
+# platform: linux/amd64
+# - name: verified-sources-fetcher
+# platform: linux/amd64
+# env:
+# IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}
+# steps:
+# - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4
+#
+# - name: login to Docker registries
+# run: |
+# docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }}
+# gcloud auth configure-docker us-docker.pkg.dev -q
+#
+# - name: Create Docker manifest
+# run: |
+# docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}")
+# platforms=${{ matrix.component.platform }}
+# for repo in "${docker_repositories[@]}"; do
+# platform_tags=""
+# for platform in ${platforms//,/ }; do
+# platform=$(echo $platform | tr '/' '-')
+# platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}"
+# done
+# for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do
+# docker manifest create ${manifest} ${platform_tags}
+# docker manifest push ${manifest}
+# done
+# done
diff --git a/.github/workflows/protobuf.yaml b/.github/workflows/protobuf.yaml
index 9c2c3418670..d533b183828 100644
--- a/.github/workflows/protobuf.yaml
+++ b/.github/workflows/protobuf.yaml
@@ -36,43 +36,43 @@ jobs:
- uses: mozilla-actions/sccache-action@89e9040de88b577a072e3760aaf59f585da083af # v0.0.5
# before
- - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4
- with:
- ref: ${{ env.BASE }}
- path: before
- fetch-depth: 0 # fetches all branches and tags, which is needed to compute the LCA.
- - name: checkout LCA
- run:
- git checkout $(git merge-base $BASE $HEAD)
- working-directory: ./before
- - name: compile before
- run: cargo check --all-targets
- working-directory: ./before/
- - name: build before.binpb
- run: >
- perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/'
- `find ./before/target/debug/build/*/output`
- | xargs cat > ./before.binpb
+ # - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4
+ # with:
+ # ref: ${{ env.BASE }}
+ # path: before
+ # fetch-depth: 0 # fetches all branches and tags, which is needed to compute the LCA.
+ # - name: checkout LCA
+ # run:
+ # git checkout $(git merge-base $BASE $HEAD)
+ # working-directory: ./before
+ # - name: compile before
+ # run: cargo check --all-targets
+ # working-directory: ./before/
+ # - name: build before.binpb
+ # run: >
+ # perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/'
+ # `find ./before/target/debug/build/*/output`
+ # | xargs cat > ./before.binpb
# after
- - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4
- with:
- ref: ${{ env.HEAD }}
- path: after
- - name: compile after
- run: cargo check --all-targets
- working-directory: ./after
- - name: build after.binpb
- run: >
- perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/'
- `find ./after/target/debug/build/*/output`
- | xargs cat > ./after.binpb
+ # - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4
+ # with:
+ # ref: ${{ env.HEAD }}
+ # path: after
+ # - name: compile after
+ # run: cargo check --all-targets
+ # working-directory: ./after
+ # - name: build after.binpb
+ # run: >
+ # perl -ne 'print "$1\n" if /PROTOBUF_DESCRIPTOR="(.*)"/'
+ # `find ./after/target/debug/build/*/output`
+ # | xargs cat > ./after.binpb
- # compare
- - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
- with:
- github_token: ${{ github.token }}
- - name: buf breaking
- run: >
- buf breaking './after.binpb' --against './before.binpb' --exclude-path 'zksync/config/experimental.proto'
- --config '{"version":"v1","breaking":{"use":["WIRE_JSON","WIRE"]}}' --error-format 'github-actions'
+ # # compare
+ # - uses: bufbuild/buf-setup-action@54abbed4fe8d8d45173eca4798b0c39a53a7b658 # v1.39.0
+ # with:
+ # github_token: ${{ github.token }}
+ # - name: buf breaking
+ # run: >
+ # buf breaking './after.binpb' --against './before.binpb' --exclude-path 'zksync/config/experimental.proto'
+ # --config '{"version":"v1","breaking":{"use":["WIRE_JSON","WIRE"]}}' --error-format 'github-actions'
diff --git a/.github/workflows/vm-perf-comparison.yml b/.github/workflows/vm-perf-comparison.yml
index 3520419f133..f3d11c430eb 100644
--- a/.github/workflows/vm-perf-comparison.yml
+++ b/.github/workflows/vm-perf-comparison.yml
@@ -18,83 +18,83 @@ jobs:
fetch-depth: 0
ref: ${{ github.base_ref }}
- - name: fetch PR branch
- run: |
- git remote add pr_repo ${{ github.event.pull_request.head.repo.clone_url }}
- git fetch pr_repo ${{ github.event.pull_request.head.ref }}
-
- - name: fetch merge-base SHA
- id: merge_base
- run: echo "sha=$(git merge-base HEAD FETCH_HEAD)" >> $GITHUB_OUTPUT
-
- - name: checkout divergence point
- run: git checkout ${{ steps.merge_base.outputs.sha }} --recurse-submodules
-
- - name: setup-env
- run: |
- touch .env
- echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
- echo $(pwd)/bin >> $GITHUB_PATH
- echo $(pwd)/zkstack_cli/zkstackup >> $GITHUB_PATH
- echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env
- echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env
- echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env
- echo "RUSTC_WRAPPER=sccache" >> .env
- # Set the minimum reported instruction count difference to reduce noise
- echo "BENCHMARK_DIFF_THRESHOLD_PERCENT=2" >> .env
-
- - name: init
- run: |
- run_retried docker compose pull zk
- docker compose up -d zk
-
- - name: run benchmarks on base branch
- shell: bash
- run: |
- ci_run zkstackup -g --local
- ci_run zkstack dev contracts --system-contracts
- ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose || echo "Instructions benchmark is missing"
- ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes
-
- - name: checkout PR
- run: |
- git checkout --force FETCH_HEAD --recurse-submodules
-
- - name: run benchmarks on PR
- shell: bash
- id: comparison
- run: |
- ci_run zkstackup -g --local
- ci_run zkstack dev contracts --system-contracts
- ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose
-
- ci_run cargo bench --package vm-benchmark --bench instructions -- --print > instructions.log 2>/dev/null
- # Output all lines from the benchmark result starting from the "## ..." comparison header.
- # Since the output spans multiple lines, we use a heredoc declaration.
- EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
- echo "speedup<<$EOF" >> $GITHUB_OUTPUT
- sed -n '/^## /,$p' instructions.log >> $GITHUB_OUTPUT
- echo "$EOF" >> $GITHUB_OUTPUT
-
- ci_run cargo run --package vm-benchmark --release --bin instruction_counts -- --diff base-opcodes > opcodes.log
- echo "opcodes<<$EOF" >> $GITHUB_OUTPUT
- sed -n '/^## /,$p' opcodes.log >> $GITHUB_OUTPUT
- echo "$EOF" >> $GITHUB_OUTPUT
-
- - name: Comment on PR
- uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0
- if: steps.comparison.outputs.speedup != '' || steps.comparison.outputs.opcodes != ''
- with:
- message: |
- ${{ steps.comparison.outputs.speedup }}
- ${{ steps.comparison.outputs.opcodes }}
- comment_tag: vm-performance-changes
- mode: recreate
- create_if_not_exists: true
- - name: Remove PR comment
- uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0
- if: steps.comparison.outputs.speedup == '' && steps.comparison.outputs.opcodes == ''
- with:
- comment_tag: vm-performance-changes
- message: 'No performance difference detected (anymore)'
- mode: delete
+# - name: fetch PR branch
+# run: |
+# git remote add pr_repo ${{ github.event.pull_request.head.repo.clone_url }}
+# git fetch pr_repo ${{ github.event.pull_request.head.ref }}
+#
+# - name: fetch merge-base SHA
+# id: merge_base
+# run: echo "sha=$(git merge-base HEAD FETCH_HEAD)" >> $GITHUB_OUTPUT
+#
+# - name: checkout divergence point
+# run: git checkout ${{ steps.merge_base.outputs.sha }} --recurse-submodules
+#
+# - name: setup-env
+# run: |
+# touch .env
+# echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV
+# echo $(pwd)/bin >> $GITHUB_PATH
+# echo $(pwd)/zkstack_cli/zkstackup >> $GITHUB_PATH
+# echo "SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage" >> .env
+# echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env
+# echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env
+# echo "RUSTC_WRAPPER=sccache" >> .env
+# # Set the minimum reported instruction count difference to reduce noise
+# echo "BENCHMARK_DIFF_THRESHOLD_PERCENT=2" >> .env
+#
+# - name: init
+# run: |
+# run_retried docker compose pull zk
+# docker compose up -d zk
+#
+# - name: run benchmarks on base branch
+# shell: bash
+# run: |
+# ci_run zkstackup -g --local
+# ci_run zkstack dev contracts --system-contracts
+# ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose || echo "Instructions benchmark is missing"
+# ci_run cargo run --package vm-benchmark --release --bin instruction_counts | tee base-opcodes
+#
+# - name: checkout PR
+# run: |
+# git checkout --force FETCH_HEAD --recurse-submodules
+#
+# - name: run benchmarks on PR
+# shell: bash
+# id: comparison
+# run: |
+# ci_run zkstackup -g --local
+# ci_run zkstack dev contracts --system-contracts
+# ci_run cargo bench --package vm-benchmark --bench instructions -- --verbose
+#
+# ci_run cargo bench --package vm-benchmark --bench instructions -- --print > instructions.log 2>/dev/null
+# # Output all lines from the benchmark result starting from the "## ..." comparison header.
+# # Since the output spans multiple lines, we use a heredoc declaration.
+# EOF=$(dd if=/dev/urandom bs=15 count=1 status=none | base64)
+# echo "speedup<<$EOF" >> $GITHUB_OUTPUT
+# sed -n '/^## /,$p' instructions.log >> $GITHUB_OUTPUT
+# echo "$EOF" >> $GITHUB_OUTPUT
+#
+# ci_run cargo run --package vm-benchmark --release --bin instruction_counts -- --diff base-opcodes > opcodes.log
+# echo "opcodes<<$EOF" >> $GITHUB_OUTPUT
+# sed -n '/^## /,$p' opcodes.log >> $GITHUB_OUTPUT
+# echo "$EOF" >> $GITHUB_OUTPUT
+#
+# - name: Comment on PR
+# uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0
+# if: steps.comparison.outputs.speedup != '' || steps.comparison.outputs.opcodes != ''
+# with:
+# message: |
+# ${{ steps.comparison.outputs.speedup }}
+# ${{ steps.comparison.outputs.opcodes }}
+# comment_tag: vm-performance-changes
+# mode: recreate
+# create_if_not_exists: true
+# - name: Remove PR comment
+# uses: thollander/actions-comment-pull-request@fabd468d3a1a0b97feee5f6b9e499eab0dd903f6 # v2.5.0
+# if: steps.comparison.outputs.speedup == '' && steps.comparison.outputs.opcodes == ''
+# with:
+# comment_tag: vm-performance-changes
+# message: 'No performance difference detected (anymore)'
+# mode: delete
diff --git a/.gitignore b/.gitignore
index adf3b779961..ea01fe127aa 100644
--- a/.gitignore
+++ b/.gitignore
@@ -36,6 +36,9 @@ Cargo.lock
/etc/env/.current
/etc/env/configs/*
!/etc/env/configs/dev.toml
+!/etc/env/configs/dev2.toml
+!/etc/env/configs/l1-hyperchain.template.toml
+!/etc/env/configs/l1-hyperchain-docker.template.toml
!/etc/env/configs/dev_validium.toml
!/etc/env/configs/dev_validium_docker.toml
!/etc/env/configs/ext-node.toml
@@ -69,6 +72,7 @@ Cargo.lock
!/etc/env/*.yaml
!/etc/env/ext-node-validium-docker.toml
/etc/tokens/localhost.json
+/etc/tokens/localhostL2.json
/etc/zksolc-bin/*
/etc/zkvyper-bin/*
/etc/solc-bin/*
diff --git a/Cargo.lock b/Cargo.lock
index 597da3c1b31..30368383981 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -10198,6 +10198,7 @@ dependencies = [
"zksync_dal",
"zksync_eth_client",
"zksync_l1_contract_interface",
+ "zksync_mini_merkle_tree",
"zksync_node_fee_model",
"zksync_node_test_utils",
"zksync_object_store",
@@ -10225,6 +10226,9 @@ dependencies = [
"anyhow",
"async-recursion",
"async-trait",
+ "bincode",
+ "hex",
+ "itertools 0.10.5",
"test-log",
"thiserror",
"tokio",
@@ -10234,9 +10238,12 @@ dependencies = [
"zksync_contracts",
"zksync_dal",
"zksync_eth_client",
+ "zksync_mini_merkle_tree",
"zksync_shared_metrics",
"zksync_system_constants",
"zksync_types",
+ "zksync_utils",
+ "zksync_web3_decl",
]
[[package]]
@@ -10425,6 +10432,7 @@ dependencies = [
"zksync_kzg",
"zksync_prover_interface",
"zksync_solidity_vk_codegen",
+ "zksync_system_constants",
"zksync_types",
]
@@ -10518,6 +10526,7 @@ name = "zksync_mini_merkle_tree"
version = "0.1.0"
dependencies = [
"criterion",
+ "hex",
"once_cell",
"zksync_basic_types",
"zksync_crypto_primitives",
@@ -10593,6 +10602,7 @@ dependencies = [
"zksync_config",
"zksync_consensus_roles",
"zksync_contracts",
+ "zksync_crypto_primitives",
"zksync_dal",
"zksync_health_check",
"zksync_metadata_calculator",
@@ -10619,6 +10629,7 @@ version = "0.1.0"
dependencies = [
"anyhow",
"async-trait",
+ "hex",
"rand 0.8.5",
"secrecy",
"semver",
@@ -10641,6 +10652,7 @@ dependencies = [
"zksync_l1_contract_interface",
"zksync_merkle_tree",
"zksync_metadata_calculator",
+ "zksync_multivm",
"zksync_node_api_server",
"zksync_node_genesis",
"zksync_node_sync",
@@ -10734,6 +10746,7 @@ dependencies = [
"zksync_house_keeper",
"zksync_logs_bloom_backfill",
"zksync_metadata_calculator",
+ "zksync_mini_merkle_tree",
"zksync_node_api_server",
"zksync_node_consensus",
"zksync_node_db_pruner",
@@ -10832,6 +10845,7 @@ dependencies = [
"zksync_dal",
"zksync_eth_client",
"zksync_health_check",
+ "zksync_multivm",
"zksync_node_genesis",
"zksync_node_test_utils",
"zksync_shared_metrics",
@@ -11267,6 +11281,7 @@ dependencies = [
"blake2 0.10.6",
"chrono",
"derive_more 1.0.0",
+ "ethabi",
"hex",
"itertools 0.10.5",
"num",
diff --git a/contracts b/contracts
index 84d5e3716f6..53b0283f82f 160000
--- a/contracts
+++ b/contracts
@@ -1 +1 @@
-Subproject commit 84d5e3716f645909e8144c7d50af9dd6dd9ded62
+Subproject commit 53b0283f82f4262c973eb3faed56ee8f6cda47b9
diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs
index 70803a66311..420a6941c81 100644
--- a/core/bin/external_node/src/config/mod.rs
+++ b/core/bin/external_node/src/config/mod.rs
@@ -105,7 +105,7 @@ pub(crate) struct RemoteENConfig {
pub state_transition_proxy_addr: Option
,
pub transparent_proxy_admin_addr: Option,
/// Should not be accessed directly. Use [`ExternalNodeConfig::diamond_proxy_address`] instead.
- diamond_proxy_addr: Address,
+ pub user_facing_diamond_proxy: Address,
// While on L1 shared bridge and legacy bridge are different contracts with different addresses,
// the `l2_erc20_bridge_addr` and `l2_shared_bridge_addr` are basically the same contract, but with
// a different name, with names adapted only for consistency.
@@ -124,6 +124,8 @@ pub(crate) struct RemoteENConfig {
pub base_token_addr: Address,
pub l1_batch_commit_data_generator_mode: L1BatchCommitmentMode,
pub dummy_verifier: bool,
+
+ pub user_facing_bridgehub: Option,
}
impl RemoteENConfig {
@@ -142,10 +144,16 @@ impl RemoteENConfig {
.rpc_context("ecosystem_contracts")
.await
.ok();
- let diamond_proxy_addr = client
+ let user_facing_diamond_proxy = client
.get_main_contract()
.rpc_context("get_main_contract")
.await?;
+
+ let user_facing_bridgehub = client
+ .get_bridgehub_contract()
+ .rpc_context("get_bridgehub_contract")
+ .await?;
+
let base_token_addr = match client.get_base_token_l1_address().await {
Err(ClientError::Call(err))
if [
@@ -188,7 +196,8 @@ impl RemoteENConfig {
transparent_proxy_admin_addr: ecosystem_contracts
.as_ref()
.map(|a| a.transparent_proxy_admin_addr),
- diamond_proxy_addr,
+ user_facing_diamond_proxy,
+ user_facing_bridgehub,
l2_testnet_paymaster_addr,
l1_erc20_bridge_proxy_addr: bridges.l1_erc20_default_bridge,
l2_erc20_bridge_addr: l2_erc20_default_bridge,
@@ -215,7 +224,8 @@ impl RemoteENConfig {
bridgehub_proxy_addr: None,
state_transition_proxy_addr: None,
transparent_proxy_admin_addr: None,
- diamond_proxy_addr: Address::repeat_byte(1),
+ user_facing_diamond_proxy: Address::repeat_byte(1),
+ user_facing_bridgehub: None,
l1_erc20_bridge_proxy_addr: Some(Address::repeat_byte(2)),
l2_erc20_bridge_addr: Some(Address::repeat_byte(3)),
l2_weth_bridge_addr: None,
@@ -1336,7 +1346,7 @@ impl ExternalNodeConfig<()> {
let remote = RemoteENConfig::fetch(main_node_client)
.await
.context("Unable to fetch required config values from the main node")?;
- let remote_diamond_proxy_addr = remote.diamond_proxy_addr;
+ let remote_diamond_proxy_addr = remote.user_facing_diamond_proxy;
if let Some(local_diamond_proxy_addr) = self.optional.contracts_diamond_proxy_addr {
anyhow::ensure!(
local_diamond_proxy_addr == remote_diamond_proxy_addr,
@@ -1387,10 +1397,11 @@ impl ExternalNodeConfig {
/// If local configuration contains the address, it will be checked against the one returned by the main node.
/// Otherwise, the remote value will be used. However, using remote value has trust implications for the main
/// node so relying on it solely is not recommended.
- pub fn diamond_proxy_address(&self) -> Address {
+ /// FIXME: This method is not used as of now, it should be used just like in the main branch
+ pub fn _diamond_proxy_address(&self) -> Address {
self.optional
.contracts_diamond_proxy_addr
- .unwrap_or(self.remote.diamond_proxy_addr)
+ .unwrap_or(self.remote.user_facing_diamond_proxy)
}
}
@@ -1399,6 +1410,9 @@ impl From<&ExternalNodeConfig> for InternalApiConfig {
Self {
l1_chain_id: config.required.l1_chain_id,
l2_chain_id: config.required.l2_chain_id,
+ // TODO: EN not supported yet
+ sl_chain_id: SLChainId(config.required.l1_chain_id.0),
+ settlement_layer_url: None,
max_tx_size: config.optional.max_tx_size_bytes,
estimate_gas_scale_factor: config.optional.estimate_gas_scale_factor,
estimate_gas_acceptable_overestimation: config
@@ -1417,7 +1431,8 @@ impl From<&ExternalNodeConfig> for InternalApiConfig {
bridgehub_proxy_addr: config.remote.bridgehub_proxy_addr,
state_transition_proxy_addr: config.remote.state_transition_proxy_addr,
transparent_proxy_admin_addr: config.remote.transparent_proxy_admin_addr,
- diamond_proxy_addr: config.remote.diamond_proxy_addr,
+ user_facing_diamond_proxy_addr: config.remote.user_facing_diamond_proxy,
+ user_facing_bridgehub_addr: config.remote.user_facing_bridgehub,
l2_testnet_paymaster_addr: config.remote.l2_testnet_paymaster_addr,
req_entities_limit: config.optional.req_entities_limit,
fee_history_limit: config.optional.fee_history_limit,
diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs
index b7f6f803902..3a43d9d492d 100644
--- a/core/bin/external_node/src/node_builder.rs
+++ b/core/bin/external_node/src/node_builder.rs
@@ -181,8 +181,7 @@ impl ExternalNodeBuilder {
let query_eth_client_layer = QueryEthClientLayer::new(
self.config.required.settlement_layer_id(),
self.config.required.eth_client_url.clone(),
- // TODO(EVM-676): add this config for external node
- Default::default(),
+ self.config.optional.gateway_url.clone(),
);
self.node.add_layer(query_eth_client_layer);
Ok(self)
@@ -278,7 +277,7 @@ impl ExternalNodeBuilder {
fn add_l1_batch_commitment_mode_validation_layer(mut self) -> anyhow::Result {
let layer = L1BatchCommitmentModeValidationLayer::new(
- self.config.diamond_proxy_address(),
+ self.config.remote.user_facing_diamond_proxy,
self.config.optional.l1_batch_commit_data_generator_mode,
);
self.node.add_layer(layer);
@@ -297,7 +296,7 @@ impl ExternalNodeBuilder {
fn add_consistency_checker_layer(mut self) -> anyhow::Result {
let max_batches_to_recheck = 10; // TODO (BFT-97): Make it a part of a proper EN config
let layer = ConsistencyCheckerLayer::new(
- self.config.diamond_proxy_address(),
+ self.config.remote.user_facing_diamond_proxy,
max_batches_to_recheck,
self.config.optional.l1_batch_commit_data_generator_mode,
);
@@ -324,7 +323,7 @@ impl ExternalNodeBuilder {
}
fn add_tree_data_fetcher_layer(mut self) -> anyhow::Result {
- let layer = TreeDataFetcherLayer::new(self.config.diamond_proxy_address());
+ let layer = TreeDataFetcherLayer::new(self.config.remote.user_facing_diamond_proxy);
self.node.add_layer(layer);
Ok(self)
}
diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs
index 59aceea819f..2155de7c020 100644
--- a/core/bin/external_node/src/tests/mod.rs
+++ b/core/bin/external_node/src/tests/mod.rs
@@ -35,7 +35,7 @@ async fn external_node_basics(components_str: &'static str) {
}
let l2_client = utils::mock_l2_client(&env);
- let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address());
+ let eth_client = utils::mock_eth_client(env.config.remote.user_facing_diamond_proxy);
let node_handle = tokio::task::spawn_blocking(move || {
std::thread::spawn(move || {
@@ -104,7 +104,7 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() {
let (env, env_handles) = utils::TestEnvironment::with_genesis_block("core").await;
let l2_client = utils::mock_l2_client_hanging();
- let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address());
+ let eth_client = utils::mock_eth_client(env.config.remote.user_facing_diamond_proxy);
let mut node_handle = tokio::task::spawn_blocking(move || {
std::thread::spawn(move || {
@@ -140,7 +140,7 @@ async fn running_tree_without_core_is_not_allowed() {
let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("tree").await;
let l2_client = utils::mock_l2_client(&env);
- let eth_client = utils::mock_eth_client(env.config.diamond_proxy_address());
+ let eth_client = utils::mock_eth_client(env.config.remote.user_facing_diamond_proxy);
let node_handle = tokio::task::spawn_blocking(move || {
std::thread::spawn(move || {
diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs
index 9e1a1b5948c..72fdc8de5cd 100644
--- a/core/bin/zksync_server/src/main.rs
+++ b/core/bin/zksync_server/src/main.rs
@@ -10,6 +10,7 @@ use zksync_config::{
StateKeeperConfig,
},
fri_prover_group::FriProverGroupConfig,
+ gateway::GatewayChainConfig,
house_keeper::HouseKeeperConfig,
secrets::DataAvailabilitySecrets,
BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, ExperimentalVmConfig,
@@ -26,7 +27,7 @@ use zksync_core_leftovers::{
temp_config_store::{read_yaml_repr, TempConfigStore},
Component, Components,
};
-use zksync_env_config::FromEnv;
+use zksync_env_config::{FromEnv, FromEnvVariant};
use crate::node_builder::MainNodeBuilder;
@@ -42,6 +43,9 @@ struct Cli {
/// Generate genesis block for the first contract deployment using temporary DB.
#[arg(long)]
genesis: bool,
+ /// FIXME: dangerous option. Should be decided within the team.
+ #[arg(long)]
+ clear_l1_txs_history: bool,
/// Comma-separated list of components to launch.
#[arg(
long,
@@ -57,6 +61,9 @@ struct Cli {
/// Path to the yaml with contracts. If set, it will be used instead of env vars.
#[arg(long)]
contracts_config_path: Option,
+ /// Path to the yaml with contracts. If set, it will be used instead of env vars.
+ #[arg(long)]
+ gateway_contracts_config_path: Option,
/// Path to the wallets config. If set, it will be used instead of env vars.
#[arg(long)]
wallets_path: Option,
@@ -127,6 +134,21 @@ fn main() -> anyhow::Result<()> {
.context("failed decoding contracts YAML config")?,
};
+ let gateway_contracts_config: Option = match opt
+ .gateway_contracts_config_path
+ {
+ None => ContractsConfig::from_env_variant("GATEWAY_".to_string())
+ .ok()
+ .map(Into::into),
+ Some(path) => {
+ let result =
+ read_yaml_repr::(&path)
+ .context("failed decoding contracts YAML config")?;
+
+ Some(result)
+ }
+ };
+
let genesis = match opt.genesis_path {
None => GenesisConfig::from_env().context("Genesis config")?,
Some(path) => read_yaml_repr::(&path)
@@ -137,7 +159,31 @@ fn main() -> anyhow::Result<()> {
.clone()
.context("observability config")?;
- let node = MainNodeBuilder::new(configs, wallets, genesis, contracts_config, secrets)?;
+ // // FIXME: don't merge this into prod
+ // if opt.clear_l1_txs_history {
+ // println!("Clearing L1 txs history!");
+
+ // let tokio_runtime = tokio::runtime::Builder::new_multi_thread()
+ // .enable_all()
+ // .build()?;
+
+ // tokio_runtime.block_on(async move {
+ // let database_secrets = secrets.database.clone().context("DatabaseSecrets").unwrap();
+ // delete_l1_txs_history(&database_secrets).await.unwrap();
+ // });
+
+ // println!("Complete!");
+ // return Ok(());
+ // }
+
+ let node = MainNodeBuilder::new(
+ configs,
+ wallets,
+ genesis,
+ contracts_config,
+ gateway_contracts_config,
+ secrets,
+ )?;
let observability_guard = {
// Observability initialization should be performed within tokio context.
diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs
index 19edef6e4ee..9a1e46f04ee 100644
--- a/core/bin/zksync_server/src/node_builder.rs
+++ b/core/bin/zksync_server/src/node_builder.rs
@@ -4,8 +4,8 @@
use anyhow::Context;
use zksync_config::{
configs::{
- da_client::DAClientConfig, secrets::DataAvailabilitySecrets, wallets::Wallets,
- GeneralConfig, Secrets,
+ da_client::DAClientConfig, gateway::GatewayChainConfig, secrets::DataAvailabilitySecrets,
+ wallets::Wallets, GeneralConfig, Secrets,
},
ContractsConfig, GenesisConfig,
};
@@ -88,6 +88,7 @@ pub struct MainNodeBuilder {
wallets: Wallets,
genesis_config: GenesisConfig,
contracts_config: ContractsConfig,
+ gateway_contracts_config: Option,
secrets: Secrets,
}
@@ -97,6 +98,7 @@ impl MainNodeBuilder {
wallets: Wallets,
genesis_config: GenesisConfig,
contracts_config: ContractsConfig,
+ gateway_contracts_config: Option,
secrets: Secrets,
) -> anyhow::Result {
Ok(Self {
@@ -105,6 +107,7 @@ impl MainNodeBuilder {
wallets,
genesis_config,
contracts_config,
+ gateway_contracts_config,
secrets,
})
}
@@ -147,6 +150,7 @@ impl MainNodeBuilder {
self.node.add_layer(PKSigningEthClientLayer::new(
eth_config,
self.contracts_config.clone(),
+ self.gateway_contracts_config.clone(),
self.genesis_config.settlement_layer_id(),
wallets,
));
@@ -159,11 +163,7 @@ impl MainNodeBuilder {
let query_eth_client_layer = QueryEthClientLayer::new(
genesis.settlement_layer_id(),
eth_config.l1_rpc_url,
- self.configs
- .eth
- .as_ref()
- .and_then(|x| Some(x.gas_adjuster?.settlement_mode))
- .unwrap_or(SettlementMode::SettlesToL1),
+ eth_config.gateway_url,
);
self.node.add_layer(query_eth_client_layer);
Ok(self)
@@ -281,6 +281,13 @@ impl MainNodeBuilder {
self.node.add_layer(EthWatchLayer::new(
try_load_config!(eth_config.watcher),
self.contracts_config.clone(),
+ self.gateway_contracts_config.clone(),
+ self.configs
+ .eth
+ .as_ref()
+ .and_then(|x| Some(x.gas_adjuster?.settlement_mode))
+ .unwrap_or(SettlementMode::SettlesToL1),
+ self.genesis_config.l2_chain_id,
));
Ok(self)
}
@@ -435,10 +442,10 @@ impl MainNodeBuilder {
fn add_eth_tx_aggregator_layer(mut self) -> anyhow::Result {
let eth_sender_config = try_load_config!(self.configs.eth);
-
self.node.add_layer(EthTxAggregatorLayer::new(
eth_sender_config,
self.contracts_config.clone(),
+ self.gateway_contracts_config.clone(),
self.genesis_config.l2_chain_id,
self.genesis_config.l1_batch_commit_data_generator_mode,
self.configs
diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs
index 7953f362fd4..d1180048efb 100644
--- a/core/lib/basic_types/src/lib.rs
+++ b/core/lib/basic_types/src/lib.rs
@@ -111,7 +111,7 @@ impl TryFrom for AccountTreeId {
/// ChainId in the ZKsync network.
#[derive(Copy, Clone, Debug, Serialize, PartialEq, Eq, PartialOrd, Ord, Hash)]
-pub struct L2ChainId(u64);
+pub struct L2ChainId(pub u64);
impl<'de> Deserialize<'de> for L2ChainId {
fn deserialize(deserializer: D) -> Result
diff --git a/core/lib/basic_types/src/protocol_version.rs b/core/lib/basic_types/src/protocol_version.rs
index ebecfaa1b87..88513360916 100644
--- a/core/lib/basic_types/src/protocol_version.rs
+++ b/core/lib/basic_types/src/protocol_version.rs
@@ -70,15 +70,16 @@ pub enum ProtocolVersionId {
Version25,
Version26,
Version27,
+ Version28,
}
impl ProtocolVersionId {
pub const fn latest() -> Self {
- Self::Version25
+ Self::Version27
}
pub const fn next() -> Self {
- Self::Version26
+ Self::Version28
}
pub fn try_from_packed_semver(packed_semver: U256) -> Result {
@@ -124,6 +125,7 @@ impl ProtocolVersionId {
ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory,
ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory,
ProtocolVersionId::Version27 => VmVersion::VmGateway,
+ ProtocolVersionId::Version28 => VmVersion::VmGateway,
}
}
@@ -285,6 +287,7 @@ impl From for VmVersion {
ProtocolVersionId::Version25 => VmVersion::Vm1_5_0IncreasedBootloaderMemory,
ProtocolVersionId::Version26 => VmVersion::Vm1_5_0IncreasedBootloaderMemory,
ProtocolVersionId::Version27 => VmVersion::VmGateway,
+ ProtocolVersionId::Version28 => VmVersion::VmGateway,
}
}
}
diff --git a/core/lib/basic_types/src/web3/mod.rs b/core/lib/basic_types/src/web3/mod.rs
index aa7c4967033..1997db6f0b6 100644
--- a/core/lib/basic_types/src/web3/mod.rs
+++ b/core/lib/basic_types/src/web3/mod.rs
@@ -190,7 +190,7 @@ pub struct Filter {
}
#[derive(Default, Debug, PartialEq, Clone)]
-pub struct ValueOrArray(Vec);
+pub struct ValueOrArray(pub Vec);
impl ValueOrArray {
pub fn flatten(self) -> Vec {
@@ -198,6 +198,12 @@ impl ValueOrArray {
}
}
+impl From for ValueOrArray {
+ fn from(value: T) -> Self {
+ Self(vec![value])
+ }
+}
+
impl Serialize for ValueOrArray
where
T: Serialize,
diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs
index ce0d9612958..21cf44cc073 100644
--- a/core/lib/config/src/configs/api.rs
+++ b/core/lib/config/src/configs/api.rs
@@ -224,6 +224,9 @@ pub struct Web3JsonRpcConfig {
/// (hundreds or thousands RPS).
#[serde(default)]
pub extended_api_tracing: bool,
+
+ #[serde(default)]
+ pub settlement_layer_url: Option,
}
impl Web3JsonRpcConfig {
@@ -264,6 +267,7 @@ impl Web3JsonRpcConfig {
whitelisted_tokens_for_aa: vec![],
api_namespaces: None,
extended_api_tracing: false,
+ settlement_layer_url: None,
}
}
diff --git a/core/lib/config/src/configs/contracts.rs b/core/lib/config/src/configs/contracts.rs
index 0bf7aab3bca..1d49a09d213 100644
--- a/core/lib/config/src/configs/contracts.rs
+++ b/core/lib/config/src/configs/contracts.rs
@@ -45,7 +45,11 @@ pub struct ContractsConfig {
pub ecosystem_contracts: Option,
// Used by the RPC API and by the node builder in wiring the BaseTokenRatioProvider layer.
pub base_token_addr: Option,
+ // FIXME: maybe refactor
+ pub user_facing_bridgehub_proxy_addr: Option,
+ pub user_facing_diamond_proxy_addr: Option,
pub chain_admin_addr: Option,
+ pub settlement_layer: Option,
pub l2_da_validator_addr: Option,
}
@@ -68,7 +72,10 @@ impl ContractsConfig {
governance_addr: Address::repeat_byte(0x13),
base_token_addr: Some(Address::repeat_byte(0x14)),
ecosystem_contracts: Some(EcosystemContracts::for_tests()),
+ user_facing_bridgehub_proxy_addr: Some(Address::repeat_byte(0x15)),
+ user_facing_diamond_proxy_addr: Some(Address::repeat_byte(0x16)),
chain_admin_addr: Some(Address::repeat_byte(0x18)),
+ settlement_layer: Some(0),
l2_da_validator_addr: Some(Address::repeat_byte(0x1a)),
}
}
diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs
index 7b67f015238..ab12642c7ba 100644
--- a/core/lib/config/src/configs/eth_sender.rs
+++ b/core/lib/config/src/configs/eth_sender.rs
@@ -42,6 +42,8 @@ impl EthConfig {
pubdata_sending_mode: PubdataSendingMode::Calldata,
tx_aggregation_paused: false,
tx_aggregation_only_prove_and_execute: false,
+ ignore_db_nonce: None,
+ priority_tree_start_index: Some(0),
time_in_mempool_in_l1_blocks_cap: 1800,
}),
gas_adjuster: Some(GasAdjusterConfig {
@@ -119,7 +121,10 @@ pub struct SenderConfig {
/// special mode specifically for gateway migration to decrease number of non-executed batches
#[serde(default = "SenderConfig::default_tx_aggregation_only_prove_and_execute")]
pub tx_aggregation_only_prove_and_execute: bool,
-
+ /// Used to ignore db nonce check for sender and only use the RPC one.
+ pub ignore_db_nonce: Option,
+ /// Index of the priority operation to start building the `PriorityMerkleTree` from.
+ pub priority_tree_start_index: Option,
/// Cap of time in mempool for price calculations
#[serde(default = "SenderConfig::default_time_in_mempool_in_l1_blocks_cap")]
pub time_in_mempool_in_l1_blocks_cap: u32,
@@ -158,6 +163,14 @@ impl SenderConfig {
.map(|pk| pk.parse().unwrap())
}
+ // Don't load gateway private key, if it's not required
+ #[deprecated]
+ pub fn private_key_gateway(&self) -> Option {
+ std::env::var("ETH_SENDER_SENDER_OPERATOR_GATEWAY_PRIVATE_KEY")
+ .ok()
+ .map(|pk| pk.parse().unwrap())
+ }
+
const fn default_tx_aggregation_paused() -> bool {
false
}
diff --git a/core/lib/config/src/configs/gateway.rs b/core/lib/config/src/configs/gateway.rs
new file mode 100644
index 00000000000..cc0cdcc1d6a
--- /dev/null
+++ b/core/lib/config/src/configs/gateway.rs
@@ -0,0 +1,73 @@
+use zksync_basic_types::{web3::Bytes, Address};
+
+use super::ContractsConfig;
+
+/// Config that is only stored for the gateway chain.
+#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq)]
+pub struct GatewayConfig {
+ pub state_transition_proxy_addr: Address,
+ pub state_transition_implementation_addr: Address,
+ pub verifier_addr: Address,
+ pub validator_timelock_addr: Address,
+ pub admin_facet_addr: Address,
+ pub mailbox_facet_addr: Address,
+ pub executor_facet_addr: Address,
+ pub getters_facet_addr: Address,
+ pub diamond_init_addr: Address,
+ pub genesis_upgrade_addr: Address,
+ pub default_upgrade_addr: Address,
+ pub multicall3_addr: Address,
+ pub relayed_sl_da_validator: Address,
+ pub validium_da_validator: Address,
+ pub diamond_cut_data: Bytes,
+}
+
+#[derive(serde::Serialize, serde::Deserialize, Debug, Clone, PartialEq)]
+pub struct GatewayChainConfig {
+ pub state_transition_proxy_addr: Address,
+ pub validator_timelock_addr: Address,
+ pub multicall3_addr: Address,
+ pub diamond_proxy_addr: Address,
+ pub chain_admin_addr: Option,
+ pub governance_addr: Address,
+ pub settlement_layer: u64,
+}
+
+impl GatewayChainConfig {
+ pub fn from_gateway_and_chain_data(
+ gateway_config: &GatewayConfig,
+ diamond_proxy_addr: Address,
+ chain_admin_addr: Address,
+ settlement_layer: u64,
+ ) -> Self {
+ // FIXME: there is no "governnace" for a chain, only an admin, we
+ // need to figure out what we mean here
+
+ Self {
+ state_transition_proxy_addr: gateway_config.state_transition_proxy_addr,
+ validator_timelock_addr: gateway_config.validator_timelock_addr,
+ multicall3_addr: gateway_config.multicall3_addr,
+ diamond_proxy_addr,
+ chain_admin_addr: Some(chain_admin_addr),
+ governance_addr: chain_admin_addr,
+ settlement_layer,
+ }
+ }
+}
+
+impl From for GatewayChainConfig {
+ fn from(value: ContractsConfig) -> Self {
+ Self {
+ state_transition_proxy_addr: value
+ .ecosystem_contracts
+ .unwrap()
+ .state_transition_proxy_addr,
+ validator_timelock_addr: value.validator_timelock_addr,
+ multicall3_addr: value.l1_multicall3_addr,
+ diamond_proxy_addr: value.diamond_proxy_addr,
+ chain_admin_addr: value.chain_admin_addr,
+ governance_addr: value.governance_addr,
+ settlement_layer: value.settlement_layer.unwrap(),
+ }
+ }
+}
diff --git a/core/lib/config/src/configs/mod.rs b/core/lib/config/src/configs/mod.rs
index b3a7c291343..ac570589d9c 100644
--- a/core/lib/config/src/configs/mod.rs
+++ b/core/lib/config/src/configs/mod.rs
@@ -18,6 +18,7 @@ pub use self::{
fri_prover_gateway::FriProverGatewayConfig,
fri_witness_generator::FriWitnessGeneratorConfig,
fri_witness_vector_generator::FriWitnessVectorGeneratorConfig,
+ gateway::{GatewayChainConfig, GatewayConfig},
general::GeneralConfig,
genesis::GenesisConfig,
object_store::ObjectStoreConfig,
@@ -54,6 +55,7 @@ pub mod fri_prover_gateway;
pub mod fri_prover_group;
pub mod fri_witness_generator;
pub mod fri_witness_vector_generator;
+pub mod gateway;
mod general;
pub mod genesis;
pub mod house_keeper;
diff --git a/core/lib/config/src/configs/secrets.rs b/core/lib/config/src/configs/secrets.rs
index 779bad37065..276f7990c7a 100644
--- a/core/lib/config/src/configs/secrets.rs
+++ b/core/lib/config/src/configs/secrets.rs
@@ -13,6 +13,7 @@ pub struct DatabaseSecrets {
#[derive(Debug, Clone, PartialEq)]
pub struct L1Secrets {
pub l1_rpc_url: SensitiveUrl,
+ pub gateway_url: Option,
}
#[derive(Debug, Clone, PartialEq)]
diff --git a/core/lib/config/src/configs/wallets.rs b/core/lib/config/src/configs/wallets.rs
index 4cb5358c8f3..90ddd90faed 100644
--- a/core/lib/config/src/configs/wallets.rs
+++ b/core/lib/config/src/configs/wallets.rs
@@ -62,6 +62,7 @@ impl Wallet {
pub struct EthSender {
pub operator: Wallet,
pub blob_operator: Option,
+ pub gateway: Option,
}
#[derive(Debug, Clone, PartialEq)]
@@ -89,6 +90,7 @@ impl Wallets {
blob_operator: Some(
Wallet::from_private_key_bytes(H256::repeat_byte(0x2), None).unwrap(),
),
+ gateway: None,
}),
state_keeper: Some(StateKeeper {
fee_account: AddressWallet::from_address(H160::repeat_byte(0x3)),
diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs
index 21ff9e2351b..4e6930a3384 100644
--- a/core/lib/config/src/testonly.rs
+++ b/core/lib/config/src/testonly.rs
@@ -116,6 +116,7 @@ impl Distribution for EncodeDist {
api_namespaces: self
.sample_opt(|| self.sample_range(rng).map(|_| self.sample(rng)).collect()),
extended_api_tracing: self.sample(rng),
+ settlement_layer_url: self.sample(rng),
}
}
}
@@ -268,8 +269,11 @@ impl Distribution for EncodeDist {
l2_testnet_paymaster_addr: self.sample_opt(|| rng.gen()),
l1_multicall3_addr: rng.gen(),
ecosystem_contracts: self.sample(rng),
+ user_facing_bridgehub_proxy_addr: rng.gen(),
+ user_facing_diamond_proxy_addr: rng.gen(),
base_token_addr: self.sample_opt(|| rng.gen()),
chain_admin_addr: self.sample_opt(|| rng.gen()),
+ settlement_layer: self.sample_opt(|| rng.gen()),
l2_da_validator_addr: self.sample_opt(|| rng.gen()),
}
}
@@ -419,6 +423,8 @@ impl Distribution for EncodeDist {
pubdata_sending_mode: PubdataSendingMode::Calldata,
tx_aggregation_paused: false,
tx_aggregation_only_prove_and_execute: false,
+ ignore_db_nonce: None,
+ priority_tree_start_index: self.sample(rng),
time_in_mempool_in_l1_blocks_cap: self.sample(rng),
}
}
@@ -852,6 +858,7 @@ impl Distribution for EncodeDist {
use configs::secrets::L1Secrets;
L1Secrets {
l1_rpc_url: format!("localhost:{}", rng.gen::()).parse().unwrap(),
+ gateway_url: Some(format!("localhost:{}", rng.gen::()).parse().unwrap()),
}
}
}
@@ -904,6 +911,7 @@ impl Distribution for EncodeDist {
configs::wallets::EthSender {
operator: self.sample(rng),
blob_operator: self.sample_opt(|| self.sample(rng)),
+ gateway: None,
}
}
}
diff --git a/core/lib/constants/src/contracts.rs b/core/lib/constants/src/contracts.rs
index 4f0f362d914..f9138b2bbf1 100644
--- a/core/lib/constants/src/contracts.rs
+++ b/core/lib/constants/src/contracts.rs
@@ -135,6 +135,7 @@ pub const EVM_GAS_MANAGER_ADDRESS: Address = H160([
0x00, 0x00, 0x80, 0x13,
]);
+/// Note, that the `Create2Factory` and higher are explicitly deployed on a non-system-contract address.
pub const CREATE2_FACTORY_ADDRESS: Address = H160([
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x01, 0x00, 0x00,
diff --git a/core/lib/constants/src/message_root.rs b/core/lib/constants/src/message_root.rs
index a8f4a034fb9..9bb8764cd66 100644
--- a/core/lib/constants/src/message_root.rs
+++ b/core/lib/constants/src/message_root.rs
@@ -1,5 +1,14 @@
-// Position of `FullTree::_height` in `MessageRoot`'s storage layout.
+/// Position of `chainCount` in `MessageRoot`'s storage layout.
+pub const CHAIN_COUNT_KEY: usize = 0;
+
+/// Position of `chainIndexToId` in `MessageRoot`'s storage layout.
+pub const CHAIN_INDEX_TO_ID_KEY: usize = 2;
+
+/// Position of `FullTree::_height` in `MessageRoot`'s storage layout.
pub const AGG_TREE_HEIGHT_KEY: usize = 3;
-// Position of `FullTree::nodes` in `MessageRoot`'s storage layout.
+/// Position of `FullTree::nodes` in `MessageRoot`'s storage layout.
pub const AGG_TREE_NODES_KEY: usize = 5;
+
+/// Position of `chainTree` in `MessageRoot`'s storage layout.
+pub const CHAIN_TREE_KEY: usize = 7;
diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs
index cb5be504c8a..af9b5fe99f2 100644
--- a/core/lib/contracts/src/lib.rs
+++ b/core/lib/contracts/src/lib.rs
@@ -36,11 +36,11 @@ const FORGE_PATH_PREFIX: &str = "contracts/l1-contracts/out";
const BRIDGEHUB_CONTRACT_FILE: (&str, &str) = ("bridgehub", "IBridgehub.sol/IBridgehub.json");
const STATE_TRANSITION_CONTRACT_FILE: (&str, &str) = (
"state-transition",
- "IStateTransitionManager.sol/IStateTransitionManager.json",
+ "IChainTypeManager.sol/IChainTypeManager.json",
);
const ZKSYNC_HYPERCHAIN_CONTRACT_FILE: (&str, &str) = (
"state-transition/chain-interfaces",
- "IZkSyncHyperchain.sol/IZkSyncHyperchain.json",
+ "IZKChain.sol/IZKChain.json",
);
const DIAMOND_INIT_CONTRACT_FILE: (&str, &str) = (
"state-transition",
@@ -208,6 +208,16 @@ pub fn l1_messenger_contract() -> Contract {
load_sys_contract("L1Messenger")
}
+pub fn l2_message_root() -> Contract {
+ load_contract(
+ "contracts/l1-contracts/artifacts-zk/contracts/bridgehub/MessageRoot.sol/MessageRoot.json",
+ )
+}
+
+pub fn l2_rollup_da_validator_bytecode() -> Vec {
+ read_bytecode("contracts/l2-contracts/artifacts-zk/contracts/data-availability/RollupL2DAValidator.sol/RollupL2DAValidator.json")
+}
+
/// Reads bytecode from the path RELATIVE to the Cargo workspace location.
pub fn read_bytecode(relative_path: impl AsRef + std::fmt::Debug) -> Vec {
read_bytecode_from_path(relative_path).expect("Exists")
@@ -286,7 +296,9 @@ impl SystemContractsRepo {
"artifacts-zk/contracts-preprocessed/{0}{1}.sol/{1}.json",
directory, name
)))
- .expect("One of the outputs should exists")
+ .unwrap_or_else(|| {
+ panic!("One of the outputs should exists for {directory}{name}");
+ })
}
}
ContractLanguage::Yul => {
@@ -314,10 +326,21 @@ pub fn read_bootloader_code(bootloader_type: &str) -> Vec {
{
return contract;
};
- read_yul_bytecode(
- "contracts/system-contracts/bootloader/build/artifacts",
- bootloader_type,
- )
+
+ let artifacts_path =
+ Path::new(&home_path()).join("contracts/system-contracts/bootloader/build/artifacts");
+ let bytecode_path = artifacts_path.join(format!("{bootloader_type}.yul.zbin"));
+ if fs::exists(bytecode_path).unwrap_or_default() {
+ read_yul_bytecode(
+ "contracts/system-contracts/bootloader/build/artifacts",
+ bootloader_type,
+ )
+ } else {
+ read_yul_bytecode(
+ "contracts/system-contracts/bootloader/tests/artifacts",
+ bootloader_type,
+ )
+ }
}
fn read_proved_batch_bootloader_bytecode() -> Vec {
@@ -518,7 +541,8 @@ impl BaseSystemContracts {
pub fn playground_gateway() -> Self {
let bootloader_bytecode = read_zbin_bytecode(
- "etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin",
+ "contracts/system-contracts/bootloader/build/artifacts/playground_batch.yul.zbin",
+ // "etc/multivm_bootloaders/vm_gateway/playground_batch.yul/playground_batch.yul.zbin",
);
BaseSystemContracts::load_with_bootloader(bootloader_bytecode)
}
@@ -595,7 +619,8 @@ impl BaseSystemContracts {
pub fn estimate_gas_gateway() -> Self {
let bootloader_bytecode = read_zbin_bytecode(
- "etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin",
+ "contracts/system-contracts/bootloader/build/artifacts/fee_estimate.yul.zbin",
+ // "etc/multivm_bootloaders/vm_gateway/fee_estimate.yul/fee_estimate.yul.zbin",
);
BaseSystemContracts::load_with_bootloader(bootloader_bytecode)
}
@@ -736,14 +761,14 @@ pub static PRE_BOOJUM_COMMIT_FUNCTION: Lazy = Lazy::new(|| {
serde_json::from_str(abi).unwrap()
});
-pub static SET_CHAIN_ID_EVENT: Lazy = Lazy::new(|| {
+pub static GENESIS_UPGRADE_EVENT: Lazy = Lazy::new(|| {
let abi = r#"
{
"anonymous": false,
"inputs": [
{
"indexed": true,
- "name": "_stateTransitionChain",
+ "name": "_hyperchain",
"type": "address"
},
{
@@ -821,9 +846,14 @@ pub static SET_CHAIN_ID_EVENT: Lazy = Lazy::new(|| {
"indexed": true,
"name": "_protocolVersion",
"type": "uint256"
+ },
+ {
+ "indexed": false,
+ "name": "_factoryDeps",
+ "type": "bytes[]"
}
],
- "name": "SetChainIdUpgrade",
+ "name": "GenesisUpgrade",
"type": "event"
}"#;
serde_json::from_str(abi).unwrap()
@@ -1006,3 +1036,319 @@ pub static DIAMOND_CUT: Lazy = Lazy::new(|| {
}"#;
serde_json::from_str(abi).unwrap()
});
+
+pub static POST_SHARED_BRIDGE_COMMIT_FUNCTION: Lazy = Lazy::new(|| {
+ let abi = r#"
+ {
+ "inputs": [
+ {
+ "internalType": "uint256",
+ "name": "_chainId",
+ "type": "uint256"
+ },
+ {
+ "components": [
+ {
+ "internalType": "uint64",
+ "name": "batchNumber",
+ "type": "uint64"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "batchHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "uint64",
+ "name": "indexRepeatedStorageChanges",
+ "type": "uint64"
+ },
+ {
+ "internalType": "uint256",
+ "name": "numberOfLayer1Txs",
+ "type": "uint256"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "priorityOperationsHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "l2LogsTreeRoot",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "uint256",
+ "name": "timestamp",
+ "type": "uint256"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "commitment",
+ "type": "bytes32"
+ }
+ ],
+ "internalType": "struct IExecutor.StoredBatchInfo",
+ "name": "_lastCommittedBatchData",
+ "type": "tuple"
+ },
+ {
+ "components": [
+ {
+ "internalType": "uint64",
+ "name": "batchNumber",
+ "type": "uint64"
+ },
+ {
+ "internalType": "uint64",
+ "name": "timestamp",
+ "type": "uint64"
+ },
+ {
+ "internalType": "uint64",
+ "name": "indexRepeatedStorageChanges",
+ "type": "uint64"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "newStateRoot",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "uint256",
+ "name": "numberOfLayer1Txs",
+ "type": "uint256"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "priorityOperationsHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "bootloaderHeapInitialContentsHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "eventsQueueStateHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "bytes",
+ "name": "systemLogs",
+ "type": "bytes"
+ },
+ {
+ "internalType": "bytes",
+ "name": "pubdataCommitments",
+ "type": "bytes"
+ }
+ ],
+ "internalType": "struct IExecutor.CommitBatchInfo[]",
+ "name": "_newBatchesData",
+ "type": "tuple[]"
+ }
+ ],
+ "name": "commitBatchesSharedBridge",
+ "outputs": [],
+ "stateMutability": "nonpayable",
+ "type": "function"
+ }"#;
+ serde_json::from_str(abi).unwrap()
+});
+
+pub static POST_SHARED_BRIDGE_PROVE_FUNCTION: Lazy = Lazy::new(|| {
+ let abi = r#"
+ {
+ "inputs": [
+ {
+ "internalType": "uint256",
+ "name": "_chainId",
+ "type": "uint256"
+ },
+ {
+ "components": [
+ {
+ "internalType": "uint64",
+ "name": "batchNumber",
+ "type": "uint64"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "batchHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "uint64",
+ "name": "indexRepeatedStorageChanges",
+ "type": "uint64"
+ },
+ {
+ "internalType": "uint256",
+ "name": "numberOfLayer1Txs",
+ "type": "uint256"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "priorityOperationsHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "l2LogsTreeRoot",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "uint256",
+ "name": "timestamp",
+ "type": "uint256"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "commitment",
+ "type": "bytes32"
+ }
+ ],
+ "internalType": "struct IExecutor.StoredBatchInfo",
+ "name": "_prevBatch",
+ "type": "tuple"
+ },
+ {
+ "components": [
+ {
+ "internalType": "uint64",
+ "name": "batchNumber",
+ "type": "uint64"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "batchHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "uint64",
+ "name": "indexRepeatedStorageChanges",
+ "type": "uint64"
+ },
+ {
+ "internalType": "uint256",
+ "name": "numberOfLayer1Txs",
+ "type": "uint256"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "priorityOperationsHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "l2LogsTreeRoot",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "uint256",
+ "name": "timestamp",
+ "type": "uint256"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "commitment",
+ "type": "bytes32"
+ }
+ ],
+ "internalType": "struct IExecutor.StoredBatchInfo[]",
+ "name": "_committedBatches",
+ "type": "tuple[]"
+ },
+ {
+ "components": [
+ {
+ "internalType": "uint256[]",
+ "name": "recursiveAggregationInput",
+ "type": "uint256[]"
+ },
+ {
+ "internalType": "uint256[]",
+ "name": "serializedProof",
+ "type": "uint256[]"
+ }
+ ],
+ "internalType": "struct IExecutor.ProofInput",
+ "name": "_proof",
+ "type": "tuple"
+ }
+ ],
+ "name": "proveBatchesSharedBridge",
+ "outputs": [],
+ "stateMutability": "nonpayable",
+ "type": "function"
+ }"#;
+ serde_json::from_str(abi).unwrap()
+});
+
+pub static POST_SHARED_BRIDGE_EXECUTE_FUNCTION: Lazy = Lazy::new(|| {
+ let abi = r#"
+ {
+ "inputs": [
+ {
+ "internalType": "uint256",
+ "name": "_chainId",
+ "type": "uint256"
+ },
+ {
+ "components": [
+ {
+ "internalType": "uint64",
+ "name": "batchNumber",
+ "type": "uint64"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "batchHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "uint64",
+ "name": "indexRepeatedStorageChanges",
+ "type": "uint64"
+ },
+ {
+ "internalType": "uint256",
+ "name": "numberOfLayer1Txs",
+ "type": "uint256"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "priorityOperationsHash",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "l2LogsTreeRoot",
+ "type": "bytes32"
+ },
+ {
+ "internalType": "uint256",
+ "name": "timestamp",
+ "type": "uint256"
+ },
+ {
+ "internalType": "bytes32",
+ "name": "commitment",
+ "type": "bytes32"
+ }
+ ],
+ "internalType": "struct IExecutor.StoredBatchInfo[]",
+ "name": "_batchesData",
+ "type": "tuple[]"
+ }
+ ],
+ "name": "executeBatchesSharedBridge",
+ "outputs": [],
+ "stateMutability": "nonpayable",
+ "type": "function"
+ }"#;
+ serde_json::from_str(abi).unwrap()
+});
diff --git a/core/lib/dal/.sqlx/query-00054de8d2b60eb0a74759c52797a1d2e9cadecd1cb64987c121aa6f8c6d2771.json b/core/lib/dal/.sqlx/query-00054de8d2b60eb0a74759c52797a1d2e9cadecd1cb64987c121aa6f8c6d2771.json
new file mode 100644
index 00000000000..aed47c1dca2
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-00054de8d2b60eb0a74759c52797a1d2e9cadecd1cb64987c121aa6f8c6d2771.json
@@ -0,0 +1,12 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n UPDATE eth_txs\n SET\n nonce = 0\n WHERE\n nonce IS NOT NULL;\n ",
+ "describe": {
+ "columns": [],
+ "parameters": {
+ "Left": []
+ },
+ "nullable": []
+ },
+ "hash": "00054de8d2b60eb0a74759c52797a1d2e9cadecd1cb64987c121aa6f8c6d2771"
+}
diff --git a/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json b/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json
index b8d6482ea74..32a2212dfdf 100644
--- a/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json
+++ b/core/lib/dal/.sqlx/query-228aa5ec4c4eb56143823b96a8190ded732839b9f5bf16042205a730fac07c3a.json
@@ -11,7 +11,8 @@
"kind": {
"Enum": [
"ProtocolUpgrades",
- "PriorityTransactions"
+ "PriorityTransactions",
+ "ChainBatchRoot"
]
}
}
diff --git a/core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json b/core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json
new file mode 100644
index 00000000000..adbd2c0931e
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4.json
@@ -0,0 +1,22 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n SELECT\n batch_chain_merkle_path\n FROM\n l1_batches\n WHERE\n number = $1\n ",
+ "describe": {
+ "columns": [
+ {
+ "ordinal": 0,
+ "name": "batch_chain_merkle_path",
+ "type_info": "Bytea"
+ }
+ ],
+ "parameters": {
+ "Left": [
+ "Int8"
+ ]
+ },
+ "nullable": [
+ true
+ ]
+ },
+ "hash": "2d0a4e9281e53b0e410b9be0ebd53b2126b52d568196f333973a345f984ea7c4"
+}
diff --git a/core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json b/core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json
new file mode 100644
index 00000000000..69dd87a6c35
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b.json
@@ -0,0 +1,22 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n SELECT\n local_root\n FROM\n l1_batches\n WHERE\n number = $1\n ",
+ "describe": {
+ "columns": [
+ {
+ "ordinal": 0,
+ "name": "local_root",
+ "type_info": "Bytea"
+ }
+ ],
+ "parameters": {
+ "Left": [
+ "Int8"
+ ]
+ },
+ "nullable": [
+ true
+ ]
+ },
+ "hash": "2e3107b0c5e8466598066ceca9844469e431e35c4419fd710050d51eeefd6b8b"
+}
diff --git a/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json b/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json
index e2a808d41f8..8bab74d20f5 100644
--- a/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json
+++ b/core/lib/dal/.sqlx/query-3ee6c2a87c65eaece7048da53c9f98ded0ad3e59e6de69c2b13d92d8ab1a07dd.json
@@ -17,7 +17,8 @@
"kind": {
"Enum": [
"ProtocolUpgrades",
- "PriorityTransactions"
+ "PriorityTransactions",
+ "ChainBatchRoot"
]
}
}
diff --git a/core/lib/dal/.sqlx/query-43dcdb8a54ed62b10ca429c1a3c7bb90e737ffe0a6c930bbffcab24ff26f70b7.json b/core/lib/dal/.sqlx/query-43dcdb8a54ed62b10ca429c1a3c7bb90e737ffe0a6c930bbffcab24ff26f70b7.json
new file mode 100644
index 00000000000..b044915a6f4
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-43dcdb8a54ed62b10ca429c1a3c7bb90e737ffe0a6c930bbffcab24ff26f70b7.json
@@ -0,0 +1,12 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n UPDATE transactions\n SET\n l1_block_number = 0\n WHERE\n l1_block_number IS NOT NULL;\n ",
+ "describe": {
+ "columns": [],
+ "parameters": {
+ "Left": []
+ },
+ "nullable": []
+ },
+ "hash": "43dcdb8a54ed62b10ca429c1a3c7bb90e737ffe0a6c930bbffcab24ff26f70b7"
+}
diff --git a/core/lib/dal/.sqlx/query-8b0cc0da34f13544e00ab9b18f54df64b3d50d310800efcc6449cb0e387d6ea5.json b/core/lib/dal/.sqlx/query-8b0cc0da34f13544e00ab9b18f54df64b3d50d310800efcc6449cb0e387d6ea5.json
new file mode 100644
index 00000000000..e8ccd163849
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-8b0cc0da34f13544e00ab9b18f54df64b3d50d310800efcc6449cb0e387d6ea5.json
@@ -0,0 +1,22 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n SELECT\n hash\n FROM\n transactions\n WHERE\n priority_op_id >= $1\n AND is_priority = TRUE\n ORDER BY\n priority_op_id\n ",
+ "describe": {
+ "columns": [
+ {
+ "ordinal": 0,
+ "name": "hash",
+ "type_info": "Bytea"
+ }
+ ],
+ "parameters": {
+ "Left": [
+ "Int8"
+ ]
+ },
+ "nullable": [
+ false
+ ]
+ },
+ "hash": "8b0cc0da34f13544e00ab9b18f54df64b3d50d310800efcc6449cb0e387d6ea5"
+}
diff --git a/core/lib/dal/.sqlx/query-abbe96ba26a046a2c000cd9b9b4e54b2a3ba8db825a3131aa36e17f0f0fadc87.json b/core/lib/dal/.sqlx/query-abbe96ba26a046a2c000cd9b9b4e54b2a3ba8db825a3131aa36e17f0f0fadc87.json
new file mode 100644
index 00000000000..ce4d8fa1911
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-abbe96ba26a046a2c000cd9b9b4e54b2a3ba8db825a3131aa36e17f0f0fadc87.json
@@ -0,0 +1,34 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n SELECT\n number AS batch_number,\n eth_txs.chain_id AS settlement_layer_id,\n eth_txs_history.tx_hash AS settlement_layer_tx_hash\n FROM\n l1_batches\n JOIN eth_txs ON l1_batches.eth_execute_tx_id = eth_txs.id\n JOIN eth_txs_history\n ON (\n eth_txs.id = eth_txs_history.eth_tx_id\n AND eth_txs_history.confirmed_at IS NOT NULL\n )\n WHERE\n l1_batches.number = $1\n ",
+ "describe": {
+ "columns": [
+ {
+ "ordinal": 0,
+ "name": "batch_number",
+ "type_info": "Int8"
+ },
+ {
+ "ordinal": 1,
+ "name": "settlement_layer_id",
+ "type_info": "Int8"
+ },
+ {
+ "ordinal": 2,
+ "name": "settlement_layer_tx_hash",
+ "type_info": "Text"
+ }
+ ],
+ "parameters": {
+ "Left": [
+ "Int8"
+ ]
+ },
+ "nullable": [
+ false,
+ true,
+ false
+ ]
+ },
+ "hash": "abbe96ba26a046a2c000cd9b9b4e54b2a3ba8db825a3131aa36e17f0f0fadc87"
+}
diff --git a/core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json b/core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json
new file mode 100644
index 00000000000..90623e77e98
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7.json
@@ -0,0 +1,15 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n UPDATE\n l1_batches\n SET\n batch_chain_merkle_path = $2\n WHERE\n number = $1\n ",
+ "describe": {
+ "columns": [],
+ "parameters": {
+ "Left": [
+ "Int8",
+ "Bytea"
+ ]
+ },
+ "nullable": []
+ },
+ "hash": "c1f9ecf033d609457106189bc4d7928aa933616d2186c13a4e005297b0ad63a7"
+}
diff --git a/core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json b/core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json
new file mode 100644
index 00000000000..751d272b0b0
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0.json
@@ -0,0 +1,22 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n SELECT\n l2_l1_merkle_root\n FROM\n l1_batches\n WHERE\n number = $1\n ",
+ "describe": {
+ "columns": [
+ {
+ "ordinal": 0,
+ "name": "l2_l1_merkle_root",
+ "type_info": "Bytea"
+ }
+ ],
+ "parameters": {
+ "Left": [
+ "Int8"
+ ]
+ },
+ "nullable": [
+ true
+ ]
+ },
+ "hash": "c2c288d268d6b266acbfc1058bc55a360f8ae12b6378f8168c000d668d6489d0"
+}
diff --git a/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json b/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json
index 61832d25fd2..5e2ea45e0bc 100644
--- a/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json
+++ b/core/lib/dal/.sqlx/query-c2c40d5aae2e0276de453c78a39ce5a6cca1524adfe99b0cb35662746479dcc1.json
@@ -11,7 +11,8 @@
"kind": {
"Enum": [
"ProtocolUpgrades",
- "PriorityTransactions"
+ "PriorityTransactions",
+ "ChainBatchRoot"
]
}
}
diff --git a/core/lib/dal/.sqlx/query-ecea1bc19022616ef1c2a69b9da38d8add1bf70064561a9631e46041d8ac7724.json b/core/lib/dal/.sqlx/query-ecea1bc19022616ef1c2a69b9da38d8add1bf70064561a9631e46041d8ac7724.json
new file mode 100644
index 00000000000..08cb51eb7c8
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-ecea1bc19022616ef1c2a69b9da38d8add1bf70064561a9631e46041d8ac7724.json
@@ -0,0 +1,22 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n SELECT\n MIN(priority_op_id) AS \"id?\"\n FROM\n transactions\n WHERE\n l1_batch_number = $1\n AND is_priority = TRUE\n ",
+ "describe": {
+ "columns": [
+ {
+ "ordinal": 0,
+ "name": "id?",
+ "type_info": "Int8"
+ }
+ ],
+ "parameters": {
+ "Left": [
+ "Int8"
+ ]
+ },
+ "nullable": [
+ null
+ ]
+ },
+ "hash": "ecea1bc19022616ef1c2a69b9da38d8add1bf70064561a9631e46041d8ac7724"
+}
diff --git a/core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json b/core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json
new file mode 100644
index 00000000000..9f7de50539b
--- /dev/null
+++ b/core/lib/dal/.sqlx/query-f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98.json
@@ -0,0 +1,28 @@
+{
+ "db_name": "PostgreSQL",
+ "query": "\n SELECT\n number, l2_l1_merkle_root\n FROM\n l1_batches\n JOIN eth_txs ON eth_txs.id = l1_batches.eth_execute_tx_id\n WHERE\n batch_chain_merkle_path IS NOT NULL\n AND chain_id = $1\n ORDER BY number\n ",
+ "describe": {
+ "columns": [
+ {
+ "ordinal": 0,
+ "name": "number",
+ "type_info": "Int8"
+ },
+ {
+ "ordinal": 1,
+ "name": "l2_l1_merkle_root",
+ "type_info": "Bytea"
+ }
+ ],
+ "parameters": {
+ "Left": [
+ "Int8"
+ ]
+ },
+ "nullable": [
+ false,
+ true
+ ]
+ },
+ "hash": "f516657dd48332522a5580e26c509fb7e3baa5ae84bd5e010008f8972e1a7f98"
+}
diff --git a/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql
new file mode 100644
index 00000000000..da7142b8f81
--- /dev/null
+++ b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.down.sql
@@ -0,0 +1,2 @@
+ALTER TABLE l1_batches
+ DROP COLUMN batch_chain_merkle_path BYTEA;
diff --git a/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql
new file mode 100644
index 00000000000..8b133f70904
--- /dev/null
+++ b/core/lib/dal/migrations/20241011081834_batch_chain_merkle_path.up.sql
@@ -0,0 +1,5 @@
+ALTER TABLE l1_batches
+ ADD COLUMN batch_chain_merkle_path BYTEA;
+
+-- postgres doesn't allow dropping enum variant, so nothing is done in down.sql
+ALTER TYPE event_type ADD VALUE 'ChainBatchRoot';
diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs
index 943aa12caf7..0935ee245b7 100644
--- a/core/lib/dal/src/blocks_dal.rs
+++ b/core/lib/dal/src/blocks_dal.rs
@@ -21,9 +21,9 @@ use zksync_types::{
},
commitment::{L1BatchCommitmentArtifacts, L1BatchWithMetadata},
fee_model::BatchFeeInput,
- l2_to_l1_log::UserL2ToL1Log,
+ l2_to_l1_log::{BatchAndChainMerklePath, UserL2ToL1Log},
writes::TreeWrite,
- Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, H256, U256,
+ Address, Bloom, L1BatchNumber, L2BlockNumber, ProtocolVersionId, SLChainId, H256, U256,
};
use zksync_vm_interface::CircuitStatistic;
@@ -1621,6 +1621,30 @@ impl BlocksDal<'_, '_> {
.context("map_l1_batches()")
}
+ pub async fn get_batch_first_priority_op_id(
+ &mut self,
+ batch_number: L1BatchNumber,
+ ) -> DalResult