diff --git a/.githooks/pre-push b/.githooks/pre-push index 25312c14c10..73168e08ec4 100755 --- a/.githooks/pre-push +++ b/.githooks/pre-push @@ -7,8 +7,13 @@ RED='\033[0;31m' NC='\033[0m' # No Color # Check that prettier formatting rules are not violated. -if ! zk fmt --check; then - echo -e "${RED}Push error!${NC}" - echo "Please format the code via 'zk fmt', cannot push unformatted code" - exit 1 +if which zk_supervisor >/dev/null; then + if ! zk_supervisor fmt --check; then + echo -e "${RED}Push error!${NC}" + echo "Please format the code via 'zks fmt', cannot push unformatted code" + exit 1 + fi +else + echo "Please install zk_toolbox using zkup from https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup, and then run ./bin/zkt from the zksync-era repository." + exit 1 fi diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md index dba6efd2fdf..a712db9f75b 100644 --- a/.github/pull_request_template.md +++ b/.github/pull_request_template.md @@ -17,4 +17,4 @@ - [ ] PR title corresponds to the body of PR (we generate changelog entries from PRs). - [ ] Tests for the changes have been added / updated. - [ ] Documentation comments have been added / updated. -- [ ] Code has been formatted via `zk fmt` and `zk lint`. +- [ ] Code has been formatted via `zk_supervisor fmt` and `zk_supervisor lint`. diff --git a/.github/release-please/manifest.json b/.github/release-please/manifest.json index 811c773b6f5..e0e8fbeecf7 100644 --- a/.github/release-please/manifest.json +++ b/.github/release-please/manifest.json @@ -1,5 +1,5 @@ { - "core": "24.24.0", + "core": "24.28.0", "prover": "16.5.0", "zk_toolbox": "0.1.2" } diff --git a/.github/scripts/rate_limit_check.sh b/.github/scripts/rate_limit_check.sh new file mode 100755 index 00000000000..6594c685d84 --- /dev/null +++ b/.github/scripts/rate_limit_check.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +set -o errexit +set -o pipefail + + +api_endpoint="https://api.github.com/users/zksync-era-bot" +wait_time=60 +max_retries=60 +retry_count=0 + +while [[ $retry_count -lt $max_retries ]]; do + response=$(run_retried curl -s -w "%{http_code}" -o temp.json "$api_endpoint") + http_code=$(echo "$response" | tail -n1) + + if [[ "$http_code" == "200" ]]; then + echo "Request successful. Not rate-limited." + cat temp.json + rm temp.json + exit 0 + elif [[ "$http_code" == "403" ]]; then + rate_limit_exceeded=$(jq -r '.message' temp.json | grep -i "API rate limit exceeded") + if [[ -n "$rate_limit_exceeded" ]]; then + retry_count=$((retry_count+1)) + echo "API rate limit exceeded. Retry $retry_count of $max_retries. Retrying in $wait_time seconds..." + sleep $wait_time + else + echo "Request failed with HTTP status $http_code." + cat temp.json + rm temp.json + exit 1 + fi + else + echo "Request failed with HTTP status $http_code." + cat temp.json + rm temp.json + exit 1 + fi +done + +echo "Reached the maximum number of retries ($max_retries). Exiting." +rm temp.json +exit 1 diff --git a/.github/workflows/build-base.yml b/.github/workflows/build-base.yml new file mode 100644 index 00000000000..83be44c126f --- /dev/null +++ b/.github/workflows/build-base.yml @@ -0,0 +1,159 @@ +name: Build zksync-build-base Docker image +on: + workflow_dispatch: + inputs: + repo_ref: + description: "git reference of the zksync-era to build" + required: true + default: main +jobs: + build-images: + name: Build and Push Docker Images + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.arch, 'arm')] }} + outputs: + image_tag_sha: ${{ steps.get-sha.outputs.image_tag_sha }} + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + env: + DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} + DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} + REPO_REF: ${{ github.event.inputs.repo_ref }} + strategy: + matrix: + name: [ build-base ] + repository: [ zksync-build-base ] + arch: [ amd64, arm64 ] + + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + with: + submodules: "recursive" + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to DockerHub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Get tag + id: get-sha + run: | + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + echo image_tag_sha=$(git rev-parse --short HEAD) >> $GITHUB_OUTPUT + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + push: true + context: . + file: docker/build-base/Dockerfile + labels: | + org.opencontainers.image.source=https://github.com/matter-labs/zksync-era + org.opencontainers.image.licenses="MIT OR Apache-2.0" + tags: | + matterlabs/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + ghcr.io/${{ github.repository_owner }}/zksync-build-base:${{ steps.get-sha.outputs.image_tag_sha }}-${{ matrix.arch }} + + multiarch_manifest: + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + needs: [ build-images ] + env: + IMAGE_TAG_SUFFIX: ${{ needs.build-images.outputs.image_tag_sha }} + runs-on: [ matterlabs-ci-runner-high-performance ] + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to DockerHub + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_TOKEN }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Create and push multi-arch manifests for Dockerhub + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="matterlabs/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull matterlabs/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("matterlabs/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done + + - name: Create and push multi-arch manifests for GitHub Container Registry + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="ghcr.io/${{ github.repository_owner }}/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull ghcr.io/${{ github.repository_owner }}/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("ghcr.io/${{ github.repository_owner }}/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done + + - name: Create and push multi-arch manifests for Google Artifact Registry + shell: bash + run: | + images=("zksync-build-base") + archs=("amd64" "arm64") + + for img in "${images[@]}"; do + multiarch_tag="us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:latest" + individual_images=() + + for arch in "${archs[@]}"; do + TAG="$IMAGE_TAG_SUFFIX" + docker pull us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch} --platform linux/${arch} + individual_images+=("us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zksync-build-base:${TAG}-${arch}") + done + + docker buildx imagetools create --tag "${multiarch_tag}" "${individual_images[@]}" + done diff --git a/.github/workflows/build-core-template.yml b/.github/workflows/build-core-template.yml index dc46c4ba95e..deaf087cd3e 100644 --- a/.github/workflows/build-core-template.yml +++ b/.github/workflows/build-core-template.yml @@ -72,7 +72,7 @@ jobs: echo "No tag found on all pages." echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" # TODO Remove it when we migrate to foundry inside contracts repository - mkdir -p contracts/l1-contracts/artifacts/ + mkdir -p contracts/l1-contracts/artifacts/ exit 0 fi filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") diff --git a/.github/workflows/build-docker-from-tag.yml b/.github/workflows/build-docker-from-tag.yml index 29d26a713d8..206e15bd195 100644 --- a/.github/workflows/build-docker-from-tag.yml +++ b/.github/workflows/build-docker-from-tag.yml @@ -108,10 +108,10 @@ jobs: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - build-gar-prover-fri-gpu: + build-gar-prover-fri-gpu-and-circuit-prover-gpu-gar: name: Build GAR prover FRI GPU needs: [ setup, build-push-prover-images ] - uses: ./.github/workflows/build-prover-fri-gpu-gar.yml + uses: ./.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml if: contains(github.ref_name, 'prover') with: setup_keys_id: ${{ needs.setup.outputs.prover_fri_gpu_key_id }} diff --git a/.github/workflows/build-prover-fri-gpu-gar.yml b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml similarity index 64% rename from .github/workflows/build-prover-fri-gpu-gar.yml rename to .github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml index c0ea060b07e..b92fb8e8111 100644 --- a/.github/workflows/build-prover-fri-gpu-gar.yml +++ b/.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml @@ -1,4 +1,4 @@ -name: Build Prover FRI GPU with builtin setup data +name: Build Prover FRI GPU & Circuit Prover GPU with builtin setup data on: workflow_call: @@ -17,7 +17,7 @@ on: type: string jobs: - build-gar-prover-fri-gpu: + build: name: Build prover FRI GPU GAR runs-on: [matterlabs-ci-runner-high-performance] steps: @@ -28,6 +28,7 @@ jobs: - name: Download Setup data run: | gsutil -m rsync -r gs://matterlabs-setup-data-us/${{ inputs.setup_keys_id }} docker/prover-gpu-fri-gar + cp -v docker/prover-gpu-fri-gar/*.bin docker/circuit-prover-gpu-gar/ - name: Login to us-central1 GAR run: | @@ -39,7 +40,15 @@ jobs: - name: Set up Docker Buildx uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 - - name: Build and push + - name: Login to Asia GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev + + - name: Login to Europe GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev + + - name: Build and push prover-gpu-fri-gar uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 with: context: docker/prover-gpu-fri-gar @@ -49,22 +58,36 @@ jobs: tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Login to Asia GAR + - name: Build and push prover-gpu-fri-gar to Asia GAR run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Build and push to Asia GAR + - name: Build and push prover-gpu-fri-gar to Europe GAR run: | docker buildx imagetools create \ - --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Login to Europe GAR + - name: Build and push circuit-prover-gpu-gar + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: docker/circuit-prover-gpu-gar + build-args: | + PROVER_IMAGE=${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + push: true + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + + - name: Build and push circuit-prover-gpu-gar to Asia GAR run: | - gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} - - name: Build and push to Europe GAR + - name: Build and push circuit-prover-gpu-gar to Europe GAR run: | docker buildx imagetools create \ - --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ - us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/prover-fri-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu-gar:2.0-${{ inputs.protocol_version }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/build-prover-template.yml b/.github/workflows/build-prover-template.yml index 4f3cad7f1d0..d6ec61114c7 100644 --- a/.github/workflows/build-prover-template.yml +++ b/.github/workflows/build-prover-template.yml @@ -52,6 +52,7 @@ jobs: - witness-generator - prover-gpu-fri - witness-vector-generator + - circuit-prover-gpu - prover-fri-gateway - prover-job-monitor - proof-fri-gpu-compressor diff --git a/.github/workflows/build-runtime-base.yml b/.github/workflows/build-runtime-base.yml new file mode 100644 index 00000000000..eaec05bc6bc --- /dev/null +++ b/.github/workflows/build-runtime-base.yml @@ -0,0 +1,66 @@ +name: Build zksync-runtime-base Docker image +on: + workflow_dispatch: + inputs: + repo_ref: + description: "git reference of the zksync-era to build" + required: true + default: main +jobs: + build-images: + name: Build and Push Docker Images + runs-on: matterlabs-ci-runner-high-performance + outputs: + image_tag_sha: ${{ steps.get-sha.outputs.image_tag_sha }} + # Needed to push to Gihub Package Registry + permissions: + packages: write + contents: read + env: + REPO_REF: ${{ github.event.inputs.repo_ref }} + strategy: + matrix: + name: [ runtime-base ] + image_name: [ zksync-runtime-base ] + + steps: + - uses: actions/checkout@6d193bf28034eafb982f37bd894289fe649468fc # v4.1.7 + with: + submodules: "recursive" + + - name: Login to google container registry + run: | + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Login to GitHub Container Registry + uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Get tag + id: get-sha + run: | + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + echo image_tag_sha=$(git rev-parse --short HEAD) >> $GITHUB_OUTPUT + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Set up QEMU + uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + push: true + context: . + platforms: arm64, amd64 + file: docker/${{ matrix.name }}/Dockerfile + labels: | + org.opencontainers.image.source=https://github.com/matter-labs/zksync-era + org.opencontainers.image.licenses="MIT OR Apache-2.0" + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.image_name }}:latest + ghcr.io/${{ github.repository_owner }}/${{ matrix.image_name }}:latest diff --git a/.github/workflows/build-witness-generator-template.yml b/.github/workflows/build-witness-generator-template.yml index 9c29297460d..33d78b3cf2f 100644 --- a/.github/workflows/build-witness-generator-template.yml +++ b/.github/workflows/build-witness-generator-template.yml @@ -51,6 +51,7 @@ jobs: ERA_BELLMAN_CUDA_RELEASE: ${{ inputs.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: ${{ inputs.CUDA_ARCH }} WITNESS_GENERATOR_RUST_FLAGS: ${{ inputs.WITNESS_GENERATOR_RUST_FLAGS }} + ZKSYNC_USE_CUDA_STUBS: true runs-on: [ matterlabs-ci-runner-c3d ] strategy: matrix: diff --git a/.github/workflows/ci-common-reusable.yml b/.github/workflows/ci-common-reusable.yml index 2e5d36feebf..2f51229aeaf 100644 --- a/.github/workflows/ci-common-reusable.yml +++ b/.github/workflows/ci-common-reusable.yml @@ -22,6 +22,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "ZKSYNC_USE_CUDA_STUBS=true" >> .env - name: Start services run: | diff --git a/.github/workflows/ci-core-lint-reusable.yml b/.github/workflows/ci-core-lint-reusable.yml index 404f0966b40..6d0785fe46f 100644 --- a/.github/workflows/ci-core-lint-reusable.yml +++ b/.github/workflows/ci-core-lint-reusable.yml @@ -19,6 +19,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "ZKSYNC_USE_CUDA_STUBS=true" >> .env echo "prover_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local_prover" >> $GITHUB_ENV echo "core_url=postgres://postgres:notsecurepassword@localhost:5432/zksync_local" >> $GITHUB_ENV diff --git a/.github/workflows/ci-core-reusable.yml b/.github/workflows/ci-core-reusable.yml index 53ff6439829..d03e44f8bca 100644 --- a/.github/workflows/ci-core-reusable.yml +++ b/.github/workflows/ci-core-reusable.yml @@ -185,6 +185,7 @@ jobs: SNAPSHOT_RECOVERY_LOGS_DIR=logs/snapshot_recovery/ GENESIS_RECOVERY_LOGS_DIR=logs/genesis_recovery/ EXTERNAL_NODE_LOGS_DIR=logs/external_node + FEES_LOGS_DIR=logs/fees REVERT_LOGS_DIR=logs/revert mkdir -p $SERVER_LOGS_DIR @@ -193,6 +194,7 @@ jobs: mkdir -p $SNAPSHOT_RECOVERY_LOGS_DIR mkdir -p $GENESIS_RECOVERY_LOGS_DIR mkdir -p $EXTERNAL_NODE_LOGS_DIR + mkdir -p $FEES_LOGS_DIR mkdir -p $REVERT_LOGS_DIR echo "SERVER_LOGS_DIR=$SERVER_LOGS_DIR" >> $GITHUB_ENV @@ -201,6 +203,7 @@ jobs: echo "SNAPSHOT_RECOVERY_LOGS_DIR=$SNAPSHOT_RECOVERY_LOGS_DIR" >> $GITHUB_ENV echo "GENESIS_RECOVERY_LOGS_DIR=$GENESIS_RECOVERY_LOGS_DIR" >> $GITHUB_ENV echo "EXTERNAL_NODE_LOGS_DIR=$EXTERNAL_NODE_LOGS_DIR" >> $GITHUB_ENV + echo "FEES_LOGS_DIR=$FEES_LOGS_DIR" >> $GITHUB_ENV echo "REVERT_LOGS_DIR=$REVERT_LOGS_DIR" >> $GITHUB_ENV - name: Initialize ecosystem @@ -213,16 +216,14 @@ jobs: --deploy-ecosystem --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_era \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_era \ --ignore-prerequisites --verbose \ --observability=false - name: Read Custom Token address and set as environment variable run: | - address=$(awk -F": " '/tokens:/ {found_tokens=1} found_tokens && /DAI:/ {found_dai=1} found_dai && /address:/ {print $2; exit}' ./configs/erc20.yaml) - echo "address=$address" - echo "address=$address" >> $GITHUB_ENV + CUSTOM_TOKEN_ADDRESS=$(awk -F": " '/tokens:/ {found_tokens=1} found_tokens && /DAI:/ {found_dai=1} found_dai && /address:/ {print $2; exit}' ./configs/erc20.yaml) + echo "CUSTOM_TOKEN_ADDRESS=$CUSTOM_TOKEN_ADDRESS" + echo "CUSTOM_TOKEN_ADDRESS=$CUSTOM_TOKEN_ADDRESS" >> $GITHUB_ENV - name: Create and initialize Validium chain run: | @@ -243,9 +244,6 @@ jobs: --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_validium \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_validium \ - --port-offset 2000 \ --chain validium - name: Create and initialize chain with Custom Token @@ -256,7 +254,7 @@ jobs: --prover-mode no-proofs \ --wallet-creation localhost \ --l1-batch-commit-data-generator-mode rollup \ - --base-token-address ${{ env.address }} \ + --base-token-address ${{ env.CUSTOM_TOKEN_ADDRESS }} \ --base-token-price-nominator 3 \ --base-token-price-denominator 2 \ --set-as-default false \ @@ -267,9 +265,6 @@ jobs: --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_custom_token \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_custom_token \ - --port-offset 3000 \ --chain custom_token - name: Create and register chain with transactions signed "offline" @@ -315,7 +310,7 @@ jobs: --prover-mode no-proofs \ --wallet-creation localhost \ --l1-batch-commit-data-generator-mode validium \ - --base-token-address ${{ env.address }} \ + --base-token-address ${{ env.CUSTOM_TOKEN_ADDRESS }} \ --base-token-price-nominator 3 \ --base-token-price-denominator 2 \ --set-as-default false \ @@ -326,9 +321,6 @@ jobs: --l1-rpc-url=http://localhost:8545 \ --server-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ --server-db-name=zksync_server_localhost_consensus \ - --prover-db-url=postgres://postgres:notsecurepassword@localhost:5432 \ - --prover-db-name=zksync_prover_localhost_consensus \ - --port-offset 4000 \ --chain consensus - name: Build test dependencies @@ -351,6 +343,10 @@ jobs: ci_run sleep 5 + - name: Setup attester committee for the consensus chain + run: | + ci_run zk_inception consensus set-attester-committee --chain consensus &> ${{ env.INTEGRATION_TESTS_LOGS_DIR }}/consensus.log + - name: Run integration tests run: | PASSED_ENV_VARS="RUN_CONTRACT_VERIFICATION_TEST" \ @@ -454,6 +450,27 @@ jobs: wait $PID3 wait $PID4 + - name: Fee projection tests + run: | + ci_run killall -INT zksync_server || true + + ci_run zk_supervisor test fees --no-deps --no-kill --chain era &> ${{ env.FEES_LOGS_DIR }}/era.log & + PID1=$! + + ci_run zk_supervisor test fees --no-deps --no-kill --chain validium &> ${{ env.FEES_LOGS_DIR }}/validium.log & + PID2=$! + + ci_run zk_supervisor test fees --no-deps --no-kill --chain custom_token &> ${{ env.FEES_LOGS_DIR }}/custom_token.log & + PID3=$! + + ci_run zk_supervisor test fees --no-deps --no-kill --chain consensus &> ${{ env.FEES_LOGS_DIR }}/consensus.log & + PID4=$! + + wait $PID1 + wait $PID2 + wait $PID3 + wait $PID4 + - name: Run revert tests run: | ci_run killall -INT zksync_server || true diff --git a/.github/workflows/ci-prover-reusable.yml b/.github/workflows/ci-prover-reusable.yml index 367a86c5f40..3f842b23488 100644 --- a/.github/workflows/ci-prover-reusable.yml +++ b/.github/workflows/ci-prover-reusable.yml @@ -57,6 +57,7 @@ jobs: echo "SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com" >> .env echo "SCCACHE_GCS_RW_MODE=READ_WRITE" >> .env echo "RUSTC_WRAPPER=sccache" >> .env + echo "ZKSYNC_USE_CUDA_STUBS=true" >> .env - name: Start services run: | diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 291f9237ac5..0a27a719aeb 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -111,7 +111,7 @@ jobs: name: Build core images needs: changed_files if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-core-template.yml + uses: ./.github/workflows/new-build-core-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -136,7 +136,7 @@ jobs: name: Build contract verifier needs: changed_files if: ${{ (needs.changed_files.outputs.core == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-contract-verifier-template.yml + uses: ./.github/workflows/new-build-contract-verifier-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -148,7 +148,7 @@ jobs: name: Build prover images needs: changed_files if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-prover-template.yml + uses: ./.github/workflows/new-build-prover-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }} action: "build" @@ -162,12 +162,10 @@ jobs: name: Build prover images with avx512 instructions needs: changed_files if: ${{ (needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true') && !contains(github.ref_name, 'release-please--branches') }} - uses: ./.github/workflows/build-witness-generator-template.yml + uses: ./.github/workflows/new-build-witness-generator-template.yml with: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 action: "build" - ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} - is_pr_from_fork: ${{ github.event.pull_request.head.repo.fork == true }} WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} diff --git a/.github/workflows/new-build-contract-verifier-template.yml b/.github/workflows/new-build-contract-verifier-template.yml new file mode 100644 index 00000000000..42791eab666 --- /dev/null +++ b/.github/workflows/new-build-contract-verifier-template.yml @@ -0,0 +1,271 @@ +name: Build contract verifier +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + compilers: + description: 'JSON of required compilers and their versions' + type: string + required: false + default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' + action: + type: string + default: non-push + required: false + +jobs: + prepare-contracts: + name: Prepare contracts + runs-on: matterlabs-ci-runner-high-performance + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Prepare ENV + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download contracts + shell: bash + run: | + commit_sha=$(git submodule status contracts | awk '{print $1}' | tr -d '-') + page=1 + filtered_tag="" + while [ true ]; do + echo "Page: $page" + tags=$(run_retried curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/matter-labs/era-contracts/tags?per_page=100&page=${page}" | jq .) + if [ $(jq length <<<"$tags") -eq 0 ]; then + echo "No tag found on all pages." + echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + exit 0 + fi + filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") + if [[ ! -z "$filtered_tag" ]]; then + echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + break + fi + ((page++)) + done + echo "Contracts tag is: ${filtered_tag}" + mkdir -p ./contracts + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l1-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l2-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/system-contracts.tar.gz + tar -C ./contracts -zxf l1-contracts.tar.gz + tar -C ./contracts -zxf l2-contracts.tar.gz + tar -C ./contracts -zxf system-contracts.tar.gz + + - name: Install Apt dependencies + shell: bash + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config + + - name: Install Node + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version: 20 + cache: 'npm' + + - name: Install Yarn + run: npm install -g yarn + + - name: Setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Install cargo-nextest from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: cargo-nextest + + - name: Install sqlx-cli from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: sqlx-cli + tag: 0.8.1 + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + + - name: Pre-download compilers + shell: bash + run: | + # Download needed versions of vyper compiler + # Not sanitized due to unconventional path and tags + mkdir -p ./hardhat-nodejs/compilers-v2/vyper/linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10+commit.91361694.linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3+commit.48e326f0.linux + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 + + COMPILERS_JSON='${{ inputs.compilers }}' + echo "$COMPILERS_JSON" | jq -r '.[] | to_entries[] | .key as $compiler | .value[] | "\(.),\($compiler)"' | while IFS=, read -r version compiler; do + mkdir -p "./hardhat-nodejs/compilers-v2/$compiler" + wget -nv -O "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" "https://github.com/matter-labs/${compiler}-bin/releases/download/v${version}/${compiler}-linux-amd64-musl-v${version}" + chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" + done + + - name: init + shell: bash + run: | + mkdir -p ./volumes/postgres + docker compose up -d postgres + zkt || true + + - name: build contracts + shell: bash + run: | + cp etc/tokens/{test,localhost}.json + zk_supervisor contracts + + - name: Upload contracts + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + with: + name: contacts-verifier + path: | + ./contracts + + build-images: + name: Build and Push Docker Images + needs: prepare-contracts + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + strategy: + matrix: + components: + - contract-verifier + - verified-sources-fetcher + platforms: + - linux/amd64 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Setup env + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download setup key + shell: bash + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Set env vars + shell: bash + run: | + echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: Download contracts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: contacts-verifier + path: | + ./contracts + + - name: login to Docker registries + if: ${{ inputs.action == 'push' }} + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + file: docker/${{ matrix.components }}/Dockerfile + build-args: | + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 + matterlabs/${{ matrix.components }}:latest2.0 + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + + create_manifest: + name: Create release manifest + runs-on: matterlabs-ci-runner + needs: build-images + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - name: contract-verifier + platform: linux/amd64 + - name: verified-sources-fetcher + platform: linux/amd64 + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + + - name: login to Docker registries + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Create Docker manifest + run: | + docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") + platforms=${{ matrix.component.platform }} + for repo in "${docker_repositories[@]}"; do + platform_tags="" + for platform in ${platforms//,/ }; do + platform=$(echo $platform | tr '/' '-') + platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" + done + for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do + docker manifest create ${manifest} ${platform_tags} + docker manifest push ${manifest} + done + done diff --git a/.github/workflows/new-build-core-template.yml b/.github/workflows/new-build-core-template.yml new file mode 100644 index 00000000000..fba6a68b8ee --- /dev/null +++ b/.github/workflows/new-build-core-template.yml @@ -0,0 +1,287 @@ +name: Build Core images +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + compilers: + description: 'JSON of required compilers and their versions' + type: string + required: false + default: '[{ "zksolc": ["1.3.14", "1.3.16", "1.3.17", "1.3.1", "1.3.7", "1.3.18", "1.3.19", "1.3.21"] } , { "zkvyper": ["1.3.13"] }]' + en_alpha_release: + description: 'Flag that determins if EN release should be marked as alpha' + type: boolean + required: false + default: false + action: + type: string + required: false + default: "do nothing" + +jobs: + prepare-contracts: + name: Prepare contracts + runs-on: matterlabs-ci-runner-high-performance + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Prepare ENV + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download contracts + shell: bash + run: | + commit_sha=$(git submodule status contracts | awk '{print $1}' | tr -d '-') + page=1 + filtered_tag="" + while [ true ]; do + echo "Page: $page" + tags=$(run_retried curl -s -H "Authorization: Bearer ${{ secrets.GITHUB_TOKEN }}" -H "Accept: application/vnd.github+json" \ + "https://api.github.com/repos/matter-labs/era-contracts/tags?per_page=100&page=${page}" | jq .) + if [ $(jq length <<<"$tags") -eq 0 ]; then + echo "No tag found on all pages." + echo "BUILD_CONTRACTS=true" >> "$GITHUB_ENV" + exit 0 + fi + filtered_tag=$(jq -r --arg commit_sha "$commit_sha" 'map(select(.commit.sha == $commit_sha)) | .[].name' <<<"$tags") + if [[ ! -z "$filtered_tag" ]]; then + echo "BUILD_CONTRACTS=false" >> "$GITHUB_ENV" + break + fi + ((page++)) + done + echo "Contracts tag is: ${filtered_tag}" + mkdir -p ./contracts + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l1-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/l2-contracts.tar.gz + run_retried curl -s -LO https://github.com/matter-labs/era-contracts/releases/download/${filtered_tag}/system-contracts.tar.gz + tar -C ./contracts -zxf l1-contracts.tar.gz + tar -C ./contracts -zxf l2-contracts.tar.gz + tar -C ./contracts -zxf system-contracts.tar.gz + + - name: Install Apt dependencies + shell: bash + run: | + sudo apt-get update && sudo apt-get install -y libssl-dev pkg-config + + - name: Install Node + uses: actions/setup-node@1e60f620b9541d16bece96c5465dc8ee9832be0b # v4.0.3 + with: + node-version: 20 + cache: 'npm' + + - name: Install Yarn + run: npm install -g yarn + + - name: Setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Install cargo-nextest from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: cargo-nextest + + - name: Install sqlx-cli from crates.io + uses: baptiste0928/cargo-install@904927dbe77864e0f2281519fe9d5bd097a220b3 # v3.1.1 + with: + crate: sqlx-cli + tag: 0.8.1 + + - name: Install Foundry + uses: foundry-rs/foundry-toolchain@8f1998e9878d786675189ef566a2e4bf24869773 # v1.2.0 + + - name: Pre-download compilers + shell: bash + run: | + # Download needed versions of vyper compiler + # Not sanitized due to unconventional path and tags + mkdir -p ./hardhat-nodejs/compilers-v2/vyper/linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 https://github.com/vyperlang/vyper/releases/download/v0.3.10/vyper.0.3.10+commit.91361694.linux + wget -nv -O ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 https://github.com/vyperlang/vyper/releases/download/v0.3.3/vyper.0.3.3+commit.48e326f0.linux + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.10 + chmod +x ./hardhat-nodejs/compilers-v2/vyper/linux/0.3.3 + + COMPILERS_JSON='${{ inputs.compilers }}' + echo "$COMPILERS_JSON" | jq -r '.[] | to_entries[] | .key as $compiler | .value[] | "\(.),\($compiler)"' | while IFS=, read -r version compiler; do + mkdir -p "./hardhat-nodejs/compilers-v2/$compiler" + wget -nv -O "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" "https://github.com/matter-labs/${compiler}-bin/releases/download/v${version}/${compiler}-linux-amd64-musl-v${version}" + chmod +x "./hardhat-nodejs/compilers-v2/$compiler/${compiler}-v${version}" + done + + - name: init + shell: bash + run: | + mkdir -p ./volumes/postgres + docker compose up -d postgres + zkt || true + + - name: build contracts + shell: bash + run: | + cp etc/tokens/{test,localhost}.json + zk_supervisor contracts + + - name: Upload contracts + uses: actions/upload-artifact@50769540e7f4bd5e21e526ee35c689e35e0d6874 # v4.4.0 + with: + name: contacts + path: | + ./contracts + + build-images: + name: Build and Push Docker Images + needs: prepare-contracts + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}${{ (inputs.en_alpha_release && matrix.components == 'external-node') && '-alpha' || '' }} + runs-on: ${{ fromJSON('["matterlabs-ci-runner-high-performance", "matterlabs-ci-runner-arm"]')[contains(matrix.platforms, 'arm')] }} + strategy: + matrix: + components: + - server-v2 + - external-node + - snapshots-creator + platforms: + - linux/amd64 + include: + - components: external-node + platforms: linux/arm64 + + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Setup env + shell: bash + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Download setup key + shell: bash + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + + - name: Set env vars + shell: bash + run: | + echo PLATFORM=$(echo ${{ matrix.platforms }} | tr '/' '-') >> $GITHUB_ENV + echo IMAGE_TAG_SHA=$(git rev-parse --short HEAD) >> $GITHUB_ENV + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: Download contracts + uses: actions/download-artifact@fa0a91b85d4f404e444e00e005971372dc801d16 # v4.1.8 + with: + name: contacts + path: | + ./contracts + + - name: login to Docker registries + if: ${{ inputs.action == 'push' }} + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + file: docker/${{ matrix.components }}/Dockerfile + build-args: | + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest + matterlabs/${{ matrix.components }}:latest + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:latest2.0 + matterlabs/${{ matrix.components }}:latest2.0 + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:${{ env.IMAGE_TAG_SHA_TS }}-${{ env.PLATFORM }} + + create_manifest: + name: Create release manifest + runs-on: matterlabs-ci-runner + needs: build-images + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - name: server-v2 + platform: linux/amd64 + - name: external-node + platform: linux/amd64,linux/arm64 + - name: snapshots-creator + platform: linux/amd64 + + env: + IMAGE_TAG_SUFFIX: ${{ inputs.image_tag_suffix }}${{ (inputs.en_alpha_release && matrix.component.name == 'external-node') && '-alpha' || '' }} + steps: + - uses: actions/checkout@a5ac7e51b41094c92402da3b24376905380afc29 # v4 + + - name: login to Docker registries + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Create Docker manifest + shell: bash + run: | + docker_repositories=("matterlabs/${{ matrix.component.name }}" "us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.component.name }}") + platforms=${{ matrix.component.platform }} + for repo in "${docker_repositories[@]}"; do + platform_tags="" + for platform in ${platforms//,/ }; do + platform=$(echo $platform | tr '/' '-') + platform_tags+=" --amend ${repo}:${IMAGE_TAG_SUFFIX}-${platform}" + done + for manifest in "${repo}:${IMAGE_TAG_SUFFIX}" "${repo}:2.0-${IMAGE_TAG_SUFFIX}" "${repo}:latest" "${repo}:latest2.0"; do + docker manifest create ${manifest} ${platform_tags} + docker manifest push ${manifest} + done + done diff --git a/.github/workflows/new-build-prover-template.yml b/.github/workflows/new-build-prover-template.yml new file mode 100644 index 00000000000..60c152213e6 --- /dev/null +++ b/.github/workflows/new-build-prover-template.yml @@ -0,0 +1,198 @@ +name: Build Prover images +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + ERA_BELLMAN_CUDA_RELEASE: + description: "ERA_BELLMAN_CUDA_RELEASE" + type: string + required: true + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + action: + description: "Action with docker image" + type: string + default: "push" + required: false + is_pr_from_fork: + description: "Indicates whether the workflow is invoked from a PR created from fork" + type: boolean + default: false + required: false + CUDA_ARCH: + description: "CUDA Arch to build" + type: string + default: "89" + required: false + outputs: + protocol_version: + description: "Protocol version of the binary" + value: ${{ jobs.get-protocol-version.outputs.protocol_version }} + +jobs: + get-protocol-version: + name: Get protocol version + runs-on: [ matterlabs-ci-runner-high-performance ] + outputs: + protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Prepare sccache-cache env vars + shell: bash + run: | + echo SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage >> $GITHUB_ENV + echo SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com >> $GITHUB_ENV + echo SCCACHE_ERROR_LOG=/tmp/sccache_log.txt >> $GITHUB_ENV + echo SCCACHE_GCS_RW_MODE=READ_WRITE >> $GITHUB_ENV + echo RUSTC_WRAPPER=sccache >> $GITHUB_ENV + + - name: protocol-version + id: protocolversion + # TODO: use -C flag, when it will become stable. + shell: bash + run: | + cd prover + cargo build --release --bin prover_version + PPV=$(target/release/prover_version) + echo Protocol version is ${PPV} + echo "protocol_version=${PPV}" >> $GITHUB_OUTPUT + + build-images: + name: Build and Push Docker Images + needs: get-protocol-version + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: [ matterlabs-ci-runner-high-performance ] + strategy: + matrix: + components: + - witness-generator + - prover-gpu-fri + - witness-vector-generator + - prover-fri-gateway + - prover-job-monitor + - proof-fri-gpu-compressor + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Set env vars + shell: bash + run: | + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: download CRS for GPU compressor + if: matrix.components == 'proof-fri-gpu-compressor' + run: | + run_retried curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^24.key + + # We need to run this only when ERA_BELLMAN_CUDA_RELEASE is not available + # In our case it happens only when PR is created from fork + - name: Wait for runner IP to be not rate-limited against GH API + if: ( inputs.is_pr_from_fork == true && matrix.components == 'proof-fri-gpu-compressor' ) + run: ./.github/scripts/rate_limit_check.sh + + - name: Hack to set env vars inside docker container + shell: bash + run: | + sed -i '/^FROM matterlabs\/zksync-build-base:latest as builder/a ENV SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage\nENV SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com\nENV SCCACHE_GCS_RW_MODE=READ_WRITE\nENV RUSTC_WRAPPER=sccache' ./docker/${{ matrix.components }}/Dockerfile + #TODO: remove AS version =) + sed -i '/^FROM matterlabs\/zksync-build-base:latest AS builder/a ENV SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage\nENV SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com\nENV SCCACHE_GCS_RW_MODE=READ_WRITE\nENV RUSTC_WRAPPER=sccache' ./docker/${{ matrix.components }}/Dockerfile + cat ./docker/${{ matrix.components }}/Dockerfile + + - name: login to Docker registries + if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + build-args: | + CUDA_ARCH=${{ inputs.CUDA_ARCH }} + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + file: docker/${{ matrix.components }}/Dockerfile + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + + copy-images: + name: Copy images between docker registries + needs: [ build-images, get-protocol-version ] + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: matterlabs-ci-runner + if: ${{ inputs.action == 'push' }} + strategy: + matrix: + component: + - witness-vector-generator + steps: + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1 + + - name: Login to us-central1 GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev + + - name: Login and push to Asia GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://asia-docker.pkg.dev + docker buildx imagetools create \ + --tag asia-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} + + - name: Login and push to Europe GAR + run: | + gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://europe-docker.pkg.dev + docker buildx imagetools create \ + --tag europe-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} \ + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ needs.build-images.outputs.protocol_version }}-${{ inputs.image_tag_suffix }} diff --git a/.github/workflows/new-build-witness-generator-template.yml b/.github/workflows/new-build-witness-generator-template.yml new file mode 100644 index 00000000000..2f1fc0b2dd8 --- /dev/null +++ b/.github/workflows/new-build-witness-generator-template.yml @@ -0,0 +1,133 @@ +name: Build witness generator image with custom compiler flags +on: + workflow_call: + secrets: + DOCKERHUB_USER: + description: "DOCKERHUB_USER" + required: true + DOCKERHUB_TOKEN: + description: "DOCKERHUB_TOKEN" + required: true + inputs: + image_tag_suffix: + description: "Optional suffix to override tag name generation" + type: string + required: false + action: + type: string + default: non-push + required: false + CUDA_ARCH: + description: "CUDA Arch to build" + type: string + default: "89" + required: false + WITNESS_GENERATOR_RUST_FLAGS: + description: "Rust flags for witness_generator compilation" + type: string + default: "" + required: false + outputs: + protocol_version: + description: "Protocol version of the binary" + value: ${{ jobs.get-protocol-version.outputs.protocol_version }} + +jobs: + get-protocol-version: + name: Get protocol version + runs-on: [ matterlabs-ci-runner-high-performance ] + outputs: + protocol_version: ${{ steps.protocolversion.outputs.protocol_version }} + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: setup rust + uses: actions-rust-lang/setup-rust-toolchain@1fbea72663f6d4c03efaab13560c8a24cfd2a7cc # v1.9.0 + with: + toolchain: nightly-2024-08-01 + + - name: Prepare sccache-cache env vars + shell: bash + run: | + echo SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage >> $GITHUB_ENV + echo SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com >> $GITHUB_ENV + echo SCCACHE_ERROR_LOG=/tmp/sccache_log.txt >> $GITHUB_ENV + echo SCCACHE_GCS_RW_MODE=READ_WRITE >> $GITHUB_ENV + echo RUSTC_WRAPPER=sccache >> $GITHUB_ENV + + - name: protocol-version + id: protocolversion + # TODO: use -C flag, when it will become stable. + shell: bash + run: | + cd prover + cargo build --release --bin prover_version + PPV=$(target/release/prover_version) + echo Protocol version is ${PPV} + echo "protocol_version=${PPV}" >> $GITHUB_OUTPUT + + build-images: + name: Build and Push Docker Images + needs: get-protocol-version + env: + PROTOCOL_VERSION: ${{ needs.get-protocol-version.outputs.protocol_version }} + runs-on: [ matterlabs-ci-runner-c3d ] + strategy: + matrix: + components: + - witness-generator + steps: + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + submodules: "recursive" + + - name: setup-env + run: | + echo ZKSYNC_HOME=$(pwd) >> $GITHUB_ENV + echo CI=1 >> $GITHUB_ENV + echo $(pwd)/bin >> $GITHUB_PATH + echo CI=1 >> .env + echo IN_DOCKER=1 >> .env + + - name: Set env vars + shell: bash + run: | + # Support for custom tag suffix + if [ -n "${{ inputs.image_tag_suffix }}" ]; then + echo IMAGE_TAG_SHA_TS="${{ inputs.image_tag_suffix }}" >> $GITHUB_ENV + else + echo IMAGE_TAG_SHA_TS=$(git rev-parse --short HEAD)-$(date +%s) >> $GITHUB_ENV + fi + + - name: login to Docker registries + if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/')) + shell: bash + run: | + docker login -u ${{ secrets.DOCKERHUB_USER }} -p ${{ secrets.DOCKERHUB_TOKEN }} + gcloud auth configure-docker us-docker.pkg.dev -q + + - name: Build and push + uses: docker/build-push-action@5cd11c3a4ced054e52742c5fd54dca954e0edd85 # v6.7.0 + with: + context: . + push: ${{ inputs.action == 'push' }} + build-args: | + CUDA_ARCH=${{ inputs.CUDA_ARCH }} + SCCACHE_GCS_BUCKET=matterlabs-infra-sccache-storage + SCCACHE_GCS_SERVICE_ACCOUNT=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com + SCCACHE_GCS_RW_MODE=READ_WRITE + RUSTC_WRAPPER=sccache + file: docker/${{ matrix.components }}/Dockerfile + tags: | + us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} + matterlabs/${{ matrix.components }}:2.0-${{ env.PROTOCOL_VERSION }}-${{ env.IMAGE_TAG_SHA_TS }} diff --git a/.github/workflows/release-test-stage.yml b/.github/workflows/release-test-stage.yml index ce74b76a6b7..11a844fdd2b 100644 --- a/.github/workflows/release-test-stage.yml +++ b/.github/workflows/release-test-stage.yml @@ -115,15 +115,15 @@ jobs: image_tag_suffix: ${{ needs.setup.outputs.image_tag_suffix }}-avx512 ERA_BELLMAN_CUDA_RELEASE: ${{ vars.ERA_BELLMAN_CUDA_RELEASE }} CUDA_ARCH: "60;70;75;80;89" - WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl" + WITNESS_GENERATOR_RUST_FLAGS: "-Ctarget_feature=+avx512bw,+avx512cd,+avx512dq,+avx512f,+avx512vl " secrets: DOCKERHUB_USER: ${{ secrets.DOCKERHUB_USER }} DOCKERHUB_TOKEN: ${{ secrets.DOCKERHUB_TOKEN }} - build-gar-prover-fri-gpu: + build-gar-prover-fri-gpu-and-circuit-prover-gpu-gar: name: Build GAR prover FRI GPU needs: [ setup, build-push-prover-images ] - uses: ./.github/workflows/build-prover-fri-gpu-gar.yml + uses: ./.github/workflows/build-prover-fri-gpu-gar-and-circuit-prover-gpu-gar.yml if: needs.changed_files.outputs.prover == 'true' || needs.changed_files.outputs.all == 'true' with: setup_keys_id: ${{ needs.setup.outputs.prover_fri_gpu_key_id }} diff --git a/.github/workflows/vm-perf-to-prometheus.yml b/.github/workflows/vm-perf-to-prometheus.yml index 04eb23f6c34..4c8c90a0d8f 100644 --- a/.github/workflows/vm-perf-to-prometheus.yml +++ b/.github/workflows/vm-perf-to-prometheus.yml @@ -34,7 +34,7 @@ jobs: run_retried docker compose pull zk docker compose up -d zk ci_run zkt - ci_run zks contracts all + ci_run zk_supervisor contracts - name: run benchmarks run: | diff --git a/.github/workflows/zk-environment-publish.yml b/.github/workflows/zk-environment-publish.yml index 5a08dff178c..73303d15cb3 100644 --- a/.github/workflows/zk-environment-publish.yml +++ b/.github/workflows/zk-environment-publish.yml @@ -4,6 +4,12 @@ on: # Workflow dispatch, to allow building and pushing new environments. # It will NOT mark them as latest. workflow_dispatch: + inputs: + build_cuda: + description: "Build CUDA images or not" + type: boolean + required: false + default: false push: branches: @@ -202,25 +208,25 @@ jobs: echo "should_run=$changed_files_output" >> "$GITHUB_OUTPUT" - name: Checkout code - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: submodules: "recursive" - name: Log in to US GAR - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} run: | gcloud auth print-access-token --lifetime=7200 --impersonate-service-account=gha-ci-runners@matterlabs-infra.iam.gserviceaccount.com | docker login -u oauth2accesstoken --password-stdin https://us-docker.pkg.dev - name: Log in to Docker Hub - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: username: ${{ secrets.DOCKERHUB_USER }} password: ${{ secrets.DOCKERHUB_TOKEN }} - name: Login to GitHub Container Registry - if: steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main' + if: ${{ (steps.condition.outputs.should_run == 'true' && github.event_name == 'push' && github.ref == 'refs/heads/main') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0 with: registry: ghcr.io @@ -228,19 +234,19 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Set up QEMU - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-qemu-action@49b3bc8e6bdd4a60e6116a5414239cba5943d3cf # v3.2.0 - name: Set up Docker Buildx - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/setup-buildx-action@aa33708b10e362ff993539393ff100fa93ed6a27 # v3.5.0 - name: Build and optionally push - if: steps.condition.outputs.should_run == 'true' + if: ${{ (steps.condition.outputs.should_run == 'true') || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0 with: file: docker/zk-environment/20.04_amd64_cuda_${{ matrix.cuda_version }}.Dockerfile - push: ${{ github.event_name == 'push' && github.ref == 'refs/heads/main' }} + push: ${{ ( github.event_name == 'push' && github.ref == 'refs/heads/main' ) || (github.event_name == 'workflow_dispatch' && inputs.build_cuda) }} tags: | us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/zk-environment-cuda-${{ matrix.cuda_version }}:latest matterlabs/zk-environment:cuda-${{ matrix.cuda_version }}-latest diff --git a/.gitignore b/.gitignore index f95274b0e36..234ce65e5b4 100644 --- a/.gitignore +++ b/.gitignore @@ -114,6 +114,7 @@ prover/data/keys/setup_* # Zk Toolbox chains/era/configs/* +chains/gateway/* configs/* era-observability/ core/tests/ts-integration/deployments-zk diff --git a/Cargo.lock b/Cargo.lock index 7a032bea5e8..57665cbcdbe 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -15,18 +15,18 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.21.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aead" @@ -40,9 +40,9 @@ dependencies = [ [[package]] name = "aes" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac1f845298e95f983ff1944b728ae08b8cebab80d684f0a832ed0fc74dfa27e2" +checksum = "b169f7a6d4742236a0a00c541b845991d0ac43e546831af1249753ab4c3aa3a0" dependencies = [ "cfg-if", "cipher", @@ -65,9 +65,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.7.7" +version = "0.7.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a824f2aa7e75a0c98c5a504fceb80649e9c35265d44525b5f94de4771a395cd" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" dependencies = [ "getrandom", "once_cell", @@ -76,9 +76,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.7" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77c3a9648d43b9cd48db467b3f87fdd6e146bcc88ab0180006cef2179fe11d01" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "once_cell", @@ -88,18 +88,18 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] [[package]] name = "allocator-api2" -version = "0.2.16" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0942ffc6dcaadf03badf6e6a2d0228460359d5e34b57ccdc720b7382dfbd5ec5" +checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" [[package]] name = "android-tzdata" @@ -133,57 +133,58 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.4" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", "anstyle-query", "anstyle-wincon", "colorchoice", + "is_terminal_polyfill", "utf8parse", ] [[package]] name = "anstyle" -version = "1.0.4" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.0.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.1" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "anyhow" -version = "1.0.80" +version = "1.0.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" [[package]] name = "arr_macro" @@ -202,15 +203,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0609c78bd572f4edc74310dfb63a01f5609d53fa8b4dd7c4d98aef3b3e8d72d1" dependencies = [ "proc-macro-hack", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "arrayref" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" @@ -229,9 +230,28 @@ checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + +[[package]] +name = "ascii-canvas" +version = "3.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8824ecca2e851cec16968d54a01dd372ef8f95b244fb84b84e70128be347c3c6" +dependencies = [ + "term", +] + +[[package]] +name = "assert-json-diff" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "47e4f2b81832e72834d7518d8487a0396a28cc408186a2e8854c0f98011faf12" +dependencies = [ + "serde", + "serde_json", +] [[package]] name = "assert_matches" @@ -239,6 +259,27 @@ version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b34d609dfbaf33d6889b2b7106d3ca345eacad44200913df5ba02bfd31d2ba9" +[[package]] +name = "async-attributes" +version = "1.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a3203e79f4dd9bdda415ed03cf14dae5a2bf775c683a00f94e9cd1faf0f596e5" +dependencies = [ + "quote 1.0.37", + "syn 1.0.109", +] + +[[package]] +name = "async-channel" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "81953c529336010edd6d8e358f886d9581267795c61b19475b71314bffa46d35" +dependencies = [ + "concurrent-queue", + "event-listener 2.5.3", + "futures-core", +] + [[package]] name = "async-channel" version = "2.3.1" @@ -275,6 +316,21 @@ dependencies = [ "futures-lite", ] +[[package]] +name = "async-global-executor" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "05b1b633a2115cd122d73b955eadd9916c18c8f510ec9cd1686404c60ad1c29c" +dependencies = [ + "async-channel 2.3.1", + "async-executor", + "async-io", + "async-lock", + "blocking", + "futures-lite", + "once_cell", +] + [[package]] name = "async-io" version = "2.3.4" @@ -316,13 +372,22 @@ dependencies = [ "futures-lite", ] +[[package]] +name = "async-object-pool" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "333c456b97c3f2d50604e8b2624253b7f787208cb72eb75e64b0ad11b221652c" +dependencies = [ + "async-std", +] + [[package]] name = "async-process" -version = "2.2.4" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a07789659a4d385b79b18b9127fc27e1a59e1e89117c78c5ea3b806f016374" +checksum = "63255f1dc2381611000436537bbedfe83183faa303a5a0edaf191edef06526bb" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-io", "async-lock", "async-signal", @@ -333,7 +398,6 @@ dependencies = [ "futures-lite", "rustix", "tracing", - "windows-sys 0.59.0", ] [[package]] @@ -343,8 +407,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -365,6 +429,34 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "async-std" +version = "1.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c634475f29802fde2b8f0b505b1bd00dfe4df7d4a000f0b36f7671197d5c3615" +dependencies = [ + "async-attributes", + "async-channel 1.9.0", + "async-global-executor", + "async-io", + "async-lock", + "async-process", + "crossbeam-utils", + "futures-channel", + "futures-core", + "futures-io", + "futures-lite", + "gloo-timers 0.3.0", + "kv-log-macro", + "log", + "memchr", + "once_cell", + "pin-project-lite", + "pin-utils", + "slab", + "wasm-bindgen-futures", +] + [[package]] name = "async-stream" version = "0.3.5" @@ -383,8 +475,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -395,13 +487,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.74" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a66537f1bb974b254c98ed142ff995236e81b9d0fe4db0575f46612cb15eb0f9" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -438,15 +530,15 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.1.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" [[package]] name = "aws-lc-rs" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8a47f2fb521b70c11ce7369a6c5fa4bd6af7e5d62ec06303875bafe7c6ba245" +checksum = "2f95446d919226d587817a7d21379e6eb099b97b45110a7f272a444ca5c54070" dependencies = [ "aws-lc-sys", "mirai-annotations", @@ -456,9 +548,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.19.0" +version = "0.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2927c7af777b460b7ccd95f8b67acd7b4c04ec8896bf0c8e80ba30523cffc057" +checksum = "b3ddc4a5b231dd6958b140ff3151b6412b3f4321fab354f399eec8f14b06df62" dependencies = [ "bindgen 0.69.4", "cc", @@ -471,18 +563,18 @@ dependencies = [ [[package]] name = "axum" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "8f43644eed690f5374f1af436ecd6aea01cd201f6fbdf0178adaf6907afb2cec" dependencies = [ "async-trait", "axum-core", "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.3.1", + "hyper 1.4.1", "hyper-util", "itoa", "matchit", @@ -498,7 +590,7 @@ dependencies = [ "serde_urlencoded", "sync_wrapper 1.0.1", "tokio", - "tower", + "tower 0.5.1", "tower-layer", "tower-service", "tracing", @@ -506,20 +598,20 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "5e6b8ba012a258d63c9adfa28b9ddcf66149da6f986c5b5452e629d5ee64bf00" dependencies = [ "async-trait", "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", "tracing", @@ -539,17 +631,17 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -578,9 +670,9 @@ checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" [[package]] name = "base64" -version = "0.21.5" +version = "0.21.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35636a1494ede3b646cc98f74f8e62c773a38a659ebc777a2cf26b9b74171df9" +checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" [[package]] name = "base64" @@ -595,12 +687,14 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" [[package]] -name = "basic-toml" -version = "0.1.4" +name = "basic-cookies" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7bfc506e7a2370ec239e1d072507b2a80c833083699d3c6fa176fbb4de8448c6" +checksum = "67bd8fd42c16bdb08688243dc5f0cc117a3ca9efeeaba3a345a18a6159ad96f7" dependencies = [ - "serde", + "lalrpop", + "lalrpop-util", + "regex", ] [[package]] @@ -648,11 +742,11 @@ dependencies = [ "peeking_take_while", "prettyplease", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "regex", "rustc-hash", "shlex", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -664,17 +758,17 @@ dependencies = [ "bitflags 2.6.0", "cexpr", "clang-sys", - "itertools 0.12.0", + "itertools 0.12.1", "lazy_static", "lazycell", "log", "prettyplease", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "regex", "rustc-hash", "shlex", - "syn 2.0.72", + "syn 2.0.77", "which", ] @@ -689,6 +783,15 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "bit-set" +version = "0.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0700ddab506f33b20a03b13996eccd309a48e5ff77d0d95926aa0210fb4e95f1" +dependencies = [ + "bit-vec", +] + [[package]] name = "bit-vec" version = "0.6.3" @@ -797,7 +900,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "23285ad32269793932e830392f2fe2f83e26488fd3ec778883a93c8323735780" dependencies = [ "arrayref", - "arrayvec 0.7.4", + "arrayvec 0.7.6", "constant_time_eq 0.3.1", ] @@ -842,7 +945,7 @@ name = "block_reverter" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.6", + "clap 4.5.18", "serde_json", "tokio", "zksync_block_reverter", @@ -862,7 +965,7 @@ version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "703f41c54fc768e63e091340b424302bb1c29ef4aa0c7f10fe849dfb114d29ea" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-task", "futures-io", "futures-lite", @@ -887,13 +990,13 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "68ec2f007ff8f90cc459f03e9f30ca1065440170f013c868823646e2e48d0234" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "bincode", "blake2 0.10.6", "const_format", "convert_case 0.6.0", "crossbeam", - "crypto-bigint 0.5.3", + "crypto-bigint 0.5.5", "derivative", "ethereum-types", "firestorm", @@ -914,9 +1017,9 @@ dependencies = [ [[package]] name = "borsh" -version = "1.3.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d4d6dafc1a3bb54687538972158f07b2c948bc57d5890df22c0739098b3028" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" dependencies = [ "borsh-derive", "cfg_aliases", @@ -924,15 +1027,15 @@ dependencies = [ [[package]] name = "borsh-derive" -version = "1.3.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4918709cc4dd777ad2b6303ed03cb37f3ca0ccede8c1b0d28ac6db8f4710e0" +checksum = "c3ef8005764f53cd4dca619f5bf64cafd4664dada50ece25e4d81de54c80cc0b" dependencies = [ "once_cell", - "proc-macro-crate 2.0.0", + "proc-macro-crate 3.2.0", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", "syn_derive", ] @@ -953,9 +1056,9 @@ checksum = "225eb82ce9e70dcc0cfa6e404d0f353326b6e163bf500ec4711cec317d11935c" [[package]] name = "bumpalo" -version = "3.14.0" +version = "3.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f30e7476521f6f8af1a1c4c0b8cc94f0bee37d91763d0ca2665f299b6cd8aec" +checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" [[package]] name = "byte-slice-cast" @@ -965,9 +1068,9 @@ checksum = "c3ac9f8b63eca6fd385229b3675f6cc0dc5c8a5c8a54a59d4f52ffd670d87b0c" [[package]] name = "bytecheck" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b6372023ac861f6e6dc89c8344a8f398fb42aaba2b5dbc649ca0c0e9dbcb627" +checksum = "23cdc57ce23ac53c931e88a43d06d070a6fd142f2617be5855eb75efc9beb1c2" dependencies = [ "bytecheck_derive", "ptr_meta", @@ -976,20 +1079,20 @@ dependencies = [ [[package]] name = "bytecheck_derive" -version = "0.6.11" +version = "0.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ec4c6f261935ad534c0c22dbef2201b45918860eb1c574b972bd213a76af61" +checksum = "3db406d29fbcd95542e92559bed4d8ad92636d1ca8b3b72ede10b4bcc010e659" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "bytecount" -version = "0.6.5" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1a12477b7237a01c11a80a51278165f9ba0edd28fa6db00a65ab230320dc58c" +checksum = "5ce89b21cab1437276d2650d57e971f9d548a2d9037cc231abdc0562b97498ce" [[package]] name = "byteorder" @@ -999,9 +1102,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "bytesize" @@ -1022,18 +1125,18 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.6" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c59e92b5a388f549b863a7bea62612c09f24c8393560709a54558a9abdfb3b9c" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" dependencies = [ "serde", ] [[package]] name = "cargo-platform" -version = "0.1.4" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12024c4645c97566567129c204f65d5815a8c9aecf30fcbe682b2fe034996d36" +checksum = "24b1f0365a6c6bb4020cd05806fd0d33c44d38046b8bd7f0e40814b9763cabfc" dependencies = [ "serde", ] @@ -1059,9 +1162,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.1.14" +version = "1.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" +checksum = "07b1695e2c7e8fc85310cde85aeaab7e3097f593c91d209d3f9df76c928100f0" dependencies = [ "jobserver", "libc", @@ -1091,9 +1194,9 @@ checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] name = "cfg_aliases" -version = "0.1.1" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd16c4719339c4530435d38e511904438d07cce7950afa3718a84ac36c10e89e" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chacha20" @@ -1136,9 +1239,9 @@ dependencies = [ [[package]] name = "ciborium" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effd91f6c78e5a4ace8a5d3c0b6bfaec9e2baaef55f3efc00e45fb2e477ee926" +checksum = "42e69ffd6f0917f5c029256a24d0161db17cea3997d185db0d35926308770f0e" dependencies = [ "ciborium-io", "ciborium-ll", @@ -1147,15 +1250,15 @@ dependencies = [ [[package]] name = "ciborium-io" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdf919175532b369853f5d5e20b26b43112613fd6fe7aee757e35f7a44642656" +checksum = "05afea1e0a06c9be33d539b876f1ce3692f4afea2cb41f740e7743225ed1c757" [[package]] name = "ciborium-ll" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaa24ecc093c77630e6c15e17c51f5e187bf35ee514f4e2d67baaa96dae22b" +checksum = "57663b653d948a338bfb3eeba9bb2fd5fcfaecb9e199e87e1eda4d9e8b240fd9" dependencies = [ "ciborium-io", "half", @@ -1174,38 +1277,38 @@ dependencies = [ [[package]] name = "circuit_encodings" -version = "0.140.1" +version = "0.140.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8438d7af992b730143b679e2c6938cb9e0193897ecaf668c59189af8ac296b7" +checksum = "cf6b7cc842eadb4c250cdc6a8bc1dd97624d9f08bbe54db3e11fb23c3a72be07" dependencies = [ "derivative", "serde", "zk_evm 0.140.0", - "zkevm_circuits 0.140.2", + "zkevm_circuits 0.140.3", ] [[package]] name = "circuit_encodings" -version = "0.141.1" +version = "0.141.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01a2fcc80e97682104f355dd819cb4972583828a6c0f65ec26889a78a84b0c56" +checksum = "7898ffbf3cd413576b4b674fe1545a35488c67eb16bd5a4148425e42c2a2b65b" dependencies = [ "derivative", "serde", "zk_evm 0.141.0", - "zkevm_circuits 0.141.1", + "zkevm_circuits 0.141.2", ] [[package]] name = "circuit_encodings" -version = "0.142.1" +version = "0.142.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94be7afb5ace6024d6e3c105d521b4b9b563bac14a92c2f59c4683e9169a25d8" +checksum = "8364ecafcc4b2c896023f8d3af952c52a500aa55f14fd268bb5d9ab07f837369" dependencies = [ "derivative", "serde", "zk_evm 0.141.0", - "zkevm_circuits 0.141.1", + "zkevm_circuits 0.141.2", ] [[package]] @@ -1239,7 +1342,7 @@ version = "0.140.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fa5f22311ce609d852d7d9f4943535ea4610aeb785129ae6ff83d5201c4fb387" dependencies = [ - "circuit_encodings 0.140.1", + "circuit_encodings 0.140.3", "derivative", "rayon", "serde", @@ -1253,7 +1356,7 @@ version = "0.141.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4c47c71d6ba83a8beb0af13af70beffd627f5497caf3d44c6f96363e788b07ea" dependencies = [ - "circuit_encodings 0.141.1", + "circuit_encodings 0.141.2", "derivative", "rayon", "serde", @@ -1267,7 +1370,7 @@ version = "0.142.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e264723359e6a1aad98110bdccf1ae3ad596e93e7d31da9e40f6adc07e4add54" dependencies = [ - "circuit_encodings 0.142.1", + "circuit_encodings 0.142.2", "derivative", "rayon", "serde", @@ -1290,9 +1393,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.6.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c688fc74432808e3eb684cae8830a86be1d66a2bd58e1f248ed0960a590baf6f" +checksum = "0b023947811758c97c59bf9d1c188fd619ad4718dcaa767947df1cadb14f39f4" dependencies = [ "glob", "libc", @@ -1323,14 +1426,14 @@ dependencies = [ "bitflags 1.3.2", "clap_lex 0.2.4", "indexmap 1.9.3", - "textwrap 0.16.0", + "textwrap 0.16.1", ] [[package]] name = "clap" -version = "4.4.6" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d04704f56c2cde07f43e8e2c154b43f216dc5c92fc98ada720177362f953b956" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", @@ -1338,26 +1441,26 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.4.6" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e231faeaca65ebd1ea3c737966bf858971cd38c3849107aa3ea7de90a804e45" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", - "clap_lex 0.5.1", - "strsim 0.10.0", + "clap_lex 0.7.2", + "strsim 0.11.1", ] [[package]] name = "clap_derive" -version = "4.4.2" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0862016ff20d69b84ef8247369fabf5c008a7417002411897d40ee1f4532b873" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ - "heck 0.4.1", + "heck 0.5.0", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -1371,15 +1474,15 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.5.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd7cc57abe963c6d3b9d8be5b06ba7c8957a930305ca90304f24ef040aa6f961" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "cmake" -version = "0.1.50" +version = "0.1.51" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "fb1e43aa7fd152b1f968787f7dbcdeb306d1867ff373c69955211876c053f91a" dependencies = [ "cc", ] @@ -1395,9 +1498,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.0" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "combine" @@ -1426,40 +1529,40 @@ dependencies = [ [[package]] name = "console" -version = "0.15.7" +version = "0.15.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c926e00cc70edefdc64d3a5ff31cc65bb97a3460097762bd23afb4d8145fccf8" +checksum = "0e1f83fc076bd6dd27517eacdf25fef6c4dfe5f1d7448bafaaf3a26f13b5e4eb" dependencies = [ "encode_unicode", "lazy_static", "libc", - "windows-sys 0.45.0", + "windows-sys 0.52.0", ] [[package]] name = "const-oid" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28c122c3980598d243d63d9a704629a2d748d101f278052ff068be5a4423ab6f" +checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const_format" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3a214c7af3d04997541b18d432afaff4c455e79e2029079647e72fc2bd27673" +checksum = "50c655d81ff1114fb0dcdea9225ea9f0cc712a6f8d189378e82bdf62a473a64b" dependencies = [ "const_format_proc_macros", ] [[package]] name = "const_format_proc_macros" -version = "0.2.32" +version = "0.2.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7f6ff08fd20f4f299298a28e2dfa8a8ba1036e6cd2460ac1de7b425d76f2500" +checksum = "eff1a44b93f47b1bac19a27932f5c591e43d1ba357ee4f61526c8a25603f0eb1" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "unicode-xid 0.2.4", + "quote 1.0.37", + "unicode-xid 0.2.6", ] [[package]] @@ -1501,24 +1604,24 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.10" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fbc60abd742b35f2492f808e1abbb83d45f72db402e14c55057edc9c7b1e9e4" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] [[package]] name = "crc" -version = "3.0.1" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86ec7a15cbe22e59248fc7eadb1907dab5ba09372595da4d73dd805ed4417dfe" +checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" dependencies = [ "crc-catalog", ] @@ -1531,9 +1634,9 @@ checksum = "19d374276b40fb8bbdee95aef7c7fa6b5316ec764510eb64b8dd0e2ed0d7e7f5" [[package]] name = "crc32fast" -version = "1.3.2" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" +checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" dependencies = [ "cfg-if", ] @@ -1650,9 +1753,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.5.3" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740fe28e594155f10cfc383984cbefd529d7396050557148f79cb0f621204124" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", "rand_core 0.6.4", @@ -1681,26 +1784,14 @@ dependencies = [ "subtle", ] -[[package]] -name = "cs_derive" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24cf603ca4299c6e20e644da88897f7b81d688510f4887e818b0bfe0b792081b" -dependencies = [ - "proc-macro-error", - "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 1.0.109", -] - [[package]] name = "ctor" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -1714,12 +1805,12 @@ dependencies = [ [[package]] name = "ctrlc" -version = "3.4.1" +version = "3.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "82e95fbd621905b854affdc67943b043a0fbb6ed7385fd5a25650d19a8a6cfdf" +checksum = "90eeab0aa92f3f9b4e87f258c72b139c207d251f9cbc1080a0086b86a8870dd3" dependencies = [ "nix", - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -1745,8 +1836,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -1788,7 +1879,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "strsim 0.10.0", "syn 1.0.109", ] @@ -1802,7 +1893,7 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "strsim 0.10.0", "syn 1.0.109", ] @@ -1816,9 +1907,9 @@ dependencies = [ "fnv", "ident_case", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "strsim 0.11.1", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -1828,7 +1919,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c972679f83bdf9c42bd905396b6c3588a843a17f0f16dfcfa3e2c5d57441835" dependencies = [ "darling_core 0.13.4", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -1839,7 +1930,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4aab4dbc9f7611d8b55048a3a16d2d010c2c8334e46304b40ac1cc14bf3b48e" dependencies = [ "darling_core 0.14.4", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -1850,8 +1941,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d336a2a514f6ccccaa3e09b02d41d35330c07ddf03a62165fcec10bb561c7806" dependencies = [ "darling_core 0.20.10", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -1889,9 +1980,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "pem-rfc7468", @@ -1900,9 +1991,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.3.9" +version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f32d04922c60427da6f9fef14d042d9edddef64cb9d4ce0d64d0685fbeb1fd3" +checksum = "b42b6fa04a440b495c8b04d0e71b707c585f83cb9cb28cf8cd0d976c315e31b4" dependencies = [ "powerfmt", "serde", @@ -1915,7 +2006,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -1927,30 +2018,30 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "convert_case 0.4.0", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "rustc_version", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] name = "derive_more" -version = "1.0.0-beta.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7abbfc297053be59290e3152f8cbcd52c8642e0728b69ee187d991d4c1af08d" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "1.0.0-beta.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bba3e9872d7c58ce7ef0fcf1844fcc3e23ef2a58377b50df35dd98e42a5726e" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", - "unicode-xid 0.2.4", + "quote 1.0.37", + "syn 2.0.77", + "unicode-xid 0.2.6", ] [[package]] @@ -1980,6 +2071,27 @@ dependencies = [ "subtle", ] +[[package]] +name = "dirs-next" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b98cf8ebf19c3d1b223e151f99a4f9f0690dca41414773390fc824184ac833e1" +dependencies = [ + "cfg-if", + "dirs-sys-next", +] + +[[package]] +name = "dirs-sys-next" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ebda144c4fe02d1f7ea1a7d9641b6fc6b580adcfa024ae48797ecdeb6825b4d" +dependencies = [ + "libc", + "redox_users", + "winapi", +] + [[package]] name = "dotenvy" version = "0.15.7" @@ -2000,9 +2112,9 @@ checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "dunce" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" [[package]] name = "ecdsa" @@ -2022,12 +2134,12 @@ version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.8", + "der 0.7.9", "digest 0.10.7", "elliptic-curve 0.13.8", "rfc6979 0.4.0", "signature 2.2.0", - "spki 0.7.2", + "spki 0.7.3", ] [[package]] @@ -2042,15 +2154,16 @@ dependencies = [ [[package]] name = "ed25519-dalek" -version = "2.0.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7277392b266383ef8396db7fdeb1e77b6c52fed775f5df15bb24f35b72156980" +checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", "rand_core 0.6.4", "serde", "sha2 0.10.8", + "subtle", "zeroize", ] @@ -2071,9 +2184,9 @@ dependencies = [ [[package]] name = "either" -version = "1.9.0" +version = "1.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a26ae43d7bcc3b814de94796a5e736d4029efb0ee900c12e2d54c993ad1a1e07" +checksum = "60b1af1c220855b6ceac025d3f6ecdd2b7c4894bfe9cd9bda4fbb4bc7c0d4cf0" dependencies = [ "serde", ] @@ -2105,7 +2218,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ "base16ct 0.2.0", - "crypto-bigint 0.5.3", + "crypto-bigint 0.5.5", "digest 0.10.7", "ff 0.13.0", "generic-array", @@ -2120,13 +2233,22 @@ dependencies = [ [[package]] name = "elsa" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "714f766f3556b44e7e4776ad133fcc3445a489517c25c704ace411bb14790194" +checksum = "d98e71ae4df57d214182a2e5cb90230c0192c6ddfcaa05c36453d46a54713e10" dependencies = [ "stable_deref_trait", ] +[[package]] +name = "ena" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d248bdd43ce613d87415282f69b9bb99d947d290b10962dd6c56233312c2ad5" +dependencies = [ + "log", +] + [[package]] name = "encode_unicode" version = "0.3.6" @@ -2135,9 +2257,9 @@ checksum = "a357d28ed41a50f9c765dbfe56cbc04a64e53e5fc58ba79fbc34c10ef3df831f" [[package]] name = "encoding_rs" -version = "0.8.33" +version = "0.8.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7268b386296a025e474d5140678f75d6de9493ae55a5d709eeb9dd08149945e1" +checksum = "b45de904aa0b010bce2ab45264d0631681847fa7b6f2eaa7dab7619943bc4f59" dependencies = [ "cfg-if", ] @@ -2150,25 +2272,27 @@ checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" dependencies = [ "once_cell", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "env_filter" -version = "0.1.0" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" +checksum = "4f2c92ceda6ceec50f43169f9ee8424fe2db276791afde7b2cd8bc084cb376ab" dependencies = [ "log", ] [[package]] name = "env_logger" -version = "0.11.3" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" +checksum = "e13fa619b91fb2381732789fc5de83b45675e882f66623b7d8cb4f643017018d" dependencies = [ + "anstream", + "anstyle", "env_filter", "log", ] @@ -2262,6 +2386,12 @@ dependencies = [ "uint", ] +[[package]] +name = "event-listener" +version = "2.5.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0206175f82b8d6bf6652ff7d71a1e27fd2e4efde587fd368662814d6ec1d9ce0" + [[package]] name = "event-listener" version = "4.0.3" @@ -2295,9 +2425,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "ff" @@ -2319,23 +2449,11 @@ dependencies = [ "subtle", ] -[[package]] -name = "ff_ce" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" -dependencies = [ - "byteorder", - "hex", - "rand 0.4.6", - "serde", -] - [[package]] name = "fiat-crypto" -version = "0.2.3" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f69037fe1b785e84986b4f2cbcf647381876a00671d25ceef715d7812dd7e1dd" +checksum = "28dea519a9695b9977216879a3ebfddf92f1c08c05d984f8996aecd6ecdc811d" [[package]] name = "findshlibs" @@ -2349,12 +2467,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "finl_unicode" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fcfdc7a0362c9f4444381a9e697c79d435fe65b52a37466fc2c1184cee9edc6" - [[package]] name = "firestorm" version = "0.5.1" @@ -2381,9 +2493,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.28" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46303f565772937ffe1d394a4fac6f411c6013172fadde9dcdb1e147a086940e" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide", @@ -2600,8 +2712,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -2618,11 +2730,11 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" dependencies = [ - "gloo-timers", + "gloo-timers 0.2.6", "send_wrapper", ] @@ -2661,7 +2773,7 @@ name = "genesis_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.6", + "clap 4.5.18", "futures 0.3.30", "serde", "serde_json", @@ -2682,9 +2794,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.10" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427" +checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", "js-sys", @@ -2705,9 +2817,9 @@ dependencies = [ [[package]] name = "ghash" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d930750de5717d2dd0b8c0d42c076c0e884c81a73e6cab859bbd2339c71e3e40" +checksum = "f0d8a4362ccb29cb0b265253fb0a2728f592895ee6854fd9bc13f2ffda266ff1" dependencies = [ "opaque-debug", "polyval", @@ -2715,9 +2827,9 @@ dependencies = [ [[package]] name = "gimli" -version = "0.28.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "glob" @@ -2735,7 +2847,7 @@ dependencies = [ "futures-core", "futures-sink", "gloo-utils", - "http 0.2.9", + "http 0.2.12", "js-sys", "pin-project", "serde", @@ -2758,6 +2870,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "gloo-timers" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bbb143cf96099802033e0d4f4963b19fd2e0b728bcf076cd9cf7f6634f092994" +dependencies = [ + "futures-channel", + "futures-core", + "js-sys", + "wasm-bindgen", +] + [[package]] name = "gloo-utils" version = "0.2.0" @@ -2778,12 +2902,12 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1112c453c2e155b3e683204ffff52bcc6d6495d04b68d9e90cd24161270c5058" dependencies = [ "async-trait", - "base64 0.21.5", + "base64 0.21.7", "google-cloud-metadata", "google-cloud-token", "home", "jsonwebtoken", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "thiserror", @@ -2799,7 +2923,7 @@ version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "04f945a208886a13d07636f38fb978da371d0abc3e34bad338124b9f8c135a8f" dependencies = [ - "reqwest 0.12.5", + "reqwest 0.12.7", "thiserror", "tokio", ] @@ -2813,7 +2937,7 @@ dependencies = [ "anyhow", "async-stream", "async-trait", - "base64 0.21.5", + "base64 0.21.7", "bytes", "futures-util", "google-cloud-auth", @@ -2824,7 +2948,7 @@ dependencies = [ "percent-encoding", "pkcs8 0.10.2", "regex", - "reqwest 0.12.5", + "reqwest 0.12.7", "reqwest-middleware", "ring", "serde", @@ -2896,8 +3020,8 @@ dependencies = [ "futures-core", "futures-sink", "futures-util", - "http 0.2.9", - "indexmap 2.1.0", + "http 0.2.12", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -2906,9 +3030,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -2916,7 +3040,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.1.0", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -2925,9 +3049,13 @@ dependencies = [ [[package]] name = "half" -version = "1.8.2" +version = "2.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eabb4a44450da02c90444cf74558da904edde8fb4e9035a9a6a4e15445af0bd7" +checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +dependencies = [ + "cfg-if", + "crunchy", +] [[package]] name = "handlebars" @@ -2949,7 +3077,7 @@ version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ - "ahash 0.7.7", + "ahash 0.7.8", ] [[package]] @@ -2958,7 +3086,7 @@ version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" dependencies = [ - "ahash 0.8.7", + "ahash 0.8.11", "allocator-api2", "serde", ] @@ -3022,9 +3150,9 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "hkdf" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791a029f6b9fc27657f6f188ec6e5e43f6911f6f878e0dc5501396e09809d437" +checksum = "7b5f8eb2ad728638ea2c7d47a21db23b7b58a72ed6a38256b8a1849f15fbbdf7" dependencies = [ "hmac 0.12.1", ] @@ -3061,11 +3189,11 @@ dependencies = [ [[package]] name = "home" -version = "0.5.5" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5444c27eef6923071f7ebcc33e3444508466a76f7a2b93da00ed6e19f30c1ddb" +checksum = "e3d1354bf6b7235cb4a0576c2619fd4ed18183f689b12b006a0ee7329eeff9a5" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -3081,9 +3209,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.9" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd6effc99afb63425aff9b05836f029929e345a6148a14b7ecd5ab67af944482" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", @@ -3108,15 +3236,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.9", + "http 0.2.12", "pin-project-lite", ] [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http 1.1.0", @@ -3131,15 +3259,15 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "pin-project-lite", ] [[package]] name = "httparse" -version = "1.8.0" +version = "1.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "0fcc0b4a115bf80b728eb8ea024ad5bd707b615bfed49e0665b6e0f86fd082d9" [[package]] name = "httpdate" @@ -3147,18 +3275,46 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" +[[package]] +name = "httpmock" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08ec9586ee0910472dec1a1f0f8acf52f0fdde93aea74d70d4a3107b4be0fd5b" +dependencies = [ + "assert-json-diff", + "async-object-pool", + "async-std", + "async-trait", + "base64 0.21.7", + "basic-cookies", + "crossbeam-utils", + "form_urlencoded", + "futures-util", + "hyper 0.14.30", + "lazy_static", + "levenshtein", + "log", + "regex", + "serde", + "serde_json", + "serde_regex", + "similar", + "tokio", + "url", +] + [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", "futures-core", "futures-util", "h2 0.3.26", - "http 0.2.9", + "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", @@ -3173,16 +3329,16 @@ dependencies = [ [[package]] name = "hyper" -version = "1.3.1" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -3199,8 +3355,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http 0.2.9", - "hyper 0.14.29", + "http 0.2.12", + "hyper 0.14.30", "log", "rustls 0.21.12", "rustls-native-certs 0.6.3", @@ -3210,16 +3366,16 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.3.1", + "hyper 1.4.1", "hyper-util", "log", - "rustls 0.23.10", + "rustls 0.23.13", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -3232,7 +3388,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.3.1", + "hyper 1.4.1", "hyper-util", "pin-project-lite", "tokio", @@ -3246,7 +3402,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.29", + "hyper 0.14.30", "native-tls", "tokio", "tokio-native-tls", @@ -3260,7 +3416,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.3.1", + "hyper 1.4.1", "hyper-util", "native-tls", "tokio", @@ -3270,20 +3426,19 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", - "hyper 1.3.1", + "http-body 1.0.1", + "hyper 1.4.1", "pin-project-lite", "socket2", "tokio", - "tower", "tower-service", "tracing", ] @@ -3296,9 +3451,9 @@ checksum = "71a816c97c42258aa5834d07590b718b4c9a598944cd39a52dc25b351185d678" [[package]] name = "iana-time-zone" -version = "0.1.58" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8326b86b6cff230b97d0d312a6c40a60726df3332e721f72a1b035f451663b20" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -3381,7 +3536,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "11d7a9f6330b71fea57921c9b61c47ee6e84f72d394754eff6163ae67e7395eb" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -3397,9 +3552,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.1.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d530e1a18b1cb4c484e6e34556a0d948706958449fca0cab753d649f2bce3d1f" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -3422,16 +3577,15 @@ dependencies = [ [[package]] name = "insta" -version = "1.34.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d64600be34b2fcfc267740a243fa7744441bb4947a619ac4e5bb6507f35fbfc" +checksum = "6593a41c7a73841868772495db7dc1e8ecab43bb5c0b6da2059246c4b506ab60" dependencies = [ "console", "lazy_static", "linked-hash-map", "serde", "similar", - "yaml-rust", ] [[package]] @@ -3445,9 +3599,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "ipnetwork" @@ -3458,6 +3612,12 @@ dependencies = [ "serde", ] +[[package]] +name = "is_terminal_polyfill" +version = "1.70.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" + [[package]] name = "itertools" version = "0.10.5" @@ -3469,18 +3629,36 @@ dependencies = [ [[package]] name = "itertools" -version = "0.12.0" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25db6b064527c5d482d0423354fcd07a89a2dfe07b67892e62411946db7f07b0" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.9" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af150ab688ff2122fcef229be89cb50dd66af9e01a4ff320cc137eecc9bacc38" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jni" @@ -3513,9 +3691,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.64" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f195fe497f702db0f318b07fdd68edb16955aed830df8363d837542f8f935a" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -3557,10 +3735,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9f9ed46590a8d5681975f126e22531698211b926129a40a2db47cbca429220" dependencies = [ "futures-util", - "http 0.2.9", + "http 0.2.12", "jsonrpsee-core 0.21.0", "pin-project", - "rustls-native-certs 0.7.0", + "rustls-native-certs 0.7.3", "rustls-pki-types", "soketto 0.7.1", "thiserror", @@ -3584,7 +3762,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee-core 0.23.2", "pin-project", - "rustls 0.23.10", + "rustls 0.23.13", "rustls-pki-types", "rustls-platform-verifier", "soketto 0.8.0", @@ -3608,7 +3786,7 @@ dependencies = [ "beef", "futures-timer", "futures-util", - "hyper 0.14.29", + "hyper 0.14.30", "jsonrpsee-types 0.21.0", "pin-project", "rustc-hash", @@ -3633,7 +3811,7 @@ dependencies = [ "futures-timer", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "jsonrpsee-types 0.23.2", "parking_lot", @@ -3656,7 +3834,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "78b7de9f3219d95985eb77fd03194d7c1b56c19bce1abfcc9d07462574b15572" dependencies = [ "async-trait", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-rustls 0.24.2", "jsonrpsee-core 0.21.0", "jsonrpsee-types 0.21.0", @@ -3664,7 +3842,7 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] @@ -3677,19 +3855,19 @@ checksum = "2d90064e04fb9d7282b1c71044ea94d0bbc6eff5621c66f1a0bce9e9de7cf3ac" dependencies = [ "async-trait", "base64 0.22.1", - "http-body 1.0.0", - "hyper 1.3.1", - "hyper-rustls 0.27.2", + "http-body 1.0.1", + "hyper 1.4.1", + "hyper-rustls 0.27.3", "hyper-util", "jsonrpsee-core 0.23.2", "jsonrpsee-types 0.23.2", - "rustls 0.23.10", + "rustls 0.23.13", "rustls-platform-verifier", "serde", "serde_json", "thiserror", "tokio", - "tower", + "tower 0.4.13", "tracing", "url", ] @@ -3701,10 +3879,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7895f186d5921065d96e16bd795e5ca89ac8356ec423fafc6e3d7cf8ec11aee4" dependencies = [ "heck 0.5.0", - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -3716,9 +3894,9 @@ dependencies = [ "anyhow", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.3.1", + "hyper 1.4.1", "hyper-util", "jsonrpsee-core 0.23.2", "jsonrpsee-types 0.23.2", @@ -3731,7 +3909,7 @@ dependencies = [ "tokio", "tokio-stream", "tokio-util", - "tower", + "tower 0.4.13", "tracing", ] @@ -3791,7 +3969,7 @@ version = "9.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b9ae10193d25051e74945f1ea2d0b42e03cc3b890f7e4cc5faa44997d808193f" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "js-sys", "pem", "ring", @@ -3814,9 +3992,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa 0.16.9", @@ -3828,13 +4006,53 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f6d5ed8676d904364de097082f4e7d240b571b67989ced0240f08b7f966f940" +checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" dependencies = [ "cpufeatures", ] +[[package]] +name = "kv-log-macro" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0de8b303297635ad57c9f5059fd9cee7a47f8e8daa09df0fcd07dd39fb22977f" +dependencies = [ + "log", +] + +[[package]] +name = "lalrpop" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "55cb077ad656299f160924eb2912aa147d7339ea7d69e1b5517326fdcec3c1ca" +dependencies = [ + "ascii-canvas", + "bit-set", + "ena", + "itertools 0.11.0", + "lalrpop-util", + "petgraph", + "pico-args", + "regex", + "regex-syntax 0.8.4", + "string_cache", + "term", + "tiny-keccak 2.0.2", + "unicode-xid 0.2.6", + "walkdir", +] + +[[package]] +name = "lalrpop-util" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" +dependencies = [ + "regex-automata 0.4.7", +] + [[package]] name = "lazy_static" version = "1.5.0" @@ -3856,20 +4074,26 @@ version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "884e2677b40cc8c339eaefcb701c32ef1fd2493d71118dc0ca4b6a736c93bd67" +[[package]] +name = "levenshtein" +version = "1.0.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db13adb97ab515a3691f56e4dbab09283d0b86cb45abd991d8634a9d6f501760" + [[package]] name = "libc" -version = "0.2.155" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libloading" -version = "0.7.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +checksum = "4979f22fdb869068da03c9f7528f8297c6fd2606bc3a4affe42e6a823fdb8da4" dependencies = [ "cfg-if", - "winapi", + "windows-targets 0.52.6", ] [[package]] @@ -3878,6 +4102,16 @@ version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +[[package]] +name = "libredox" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" +dependencies = [ + "bitflags 2.6.0", + "libc", +] + [[package]] name = "librocksdb-sys" version = "0.11.0+8.1.1" @@ -3955,9 +4189,9 @@ dependencies = [ [[package]] name = "libz-sys" -version = "1.1.12" +version = "1.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97137b25e321a73eef1418d1d5d2eda4d77e12813f8e6dead84bc52c5870a7b" +checksum = "d2d16453e800a8cf6dd2fc3eb4bc99b786a9b90c663b8559a5b1a041bf89e472" dependencies = [ "cc", "pkg-config", @@ -3989,7 +4223,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "regex", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "static_assertions", @@ -4010,9 +4244,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c168f8615b12bc01f9c17e2eb0cc07dcae1940121185446edc3744920e8ef45" +checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" dependencies = [ "autocfg", "scopeguard", @@ -4020,9 +4254,12 @@ dependencies = [ [[package]] name = "log" -version = "0.4.21" +version = "0.4.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "a7a70ba024b9dc04c27ea2f0c0548feb474ec5c54bba33a7f72f873a39d07b24" +dependencies = [ + "value-bag", +] [[package]] name = "logos" @@ -4042,9 +4279,9 @@ dependencies = [ "beef", "fnv", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "regex-syntax 0.6.29", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -4058,18 +4295,18 @@ dependencies = [ [[package]] name = "lru" -version = "0.12.1" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2994eeba8ed550fd9b47a0b38f0242bc3344e496483c6180b69139cc2fa5d1d7" +checksum = "37ee39891760e7d94734f6f63fedc29a2e4a152f836120753a72503f09fcf904" dependencies = [ "hashbrown 0.14.5", ] [[package]] name = "lz4-sys" -version = "1.9.4" +version = "1.11.1+lz4-1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57d27b317e207b10f69f5e75494119e391a96f48861ae870d1da6edac98ca900" +checksum = "6bd8c0d6c6ed0cd30b3652886bb8711dc4bb01d637a68105a3d5158039b418e6" dependencies = [ "cc", "libc", @@ -4117,16 +4354,16 @@ dependencies = [ [[package]] name = "memchr" -version = "2.6.4" +version = "2.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" +checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" [[package]] name = "merkle_tree_consistency_checker" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.6", + "clap 4.5.18", "tracing", "zksync_config", "zksync_env_config", @@ -4167,8 +4404,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -4179,9 +4416,9 @@ checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" [[package]] name = "mime_guess" -version = "2.0.4" +version = "2.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4192263c238a5f0d0c6bfd21f336a313a4ce1c450542449ca191bb657b4642ef" +checksum = "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" dependencies = [ "mime", "unicase", @@ -4189,9 +4426,9 @@ dependencies = [ [[package]] name = "mini-moka" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23e0b72e7c9042467008b10279fc732326bd605459ae03bda88825909dd19b56" +checksum = "c325dfab65f261f386debee8b0969da215b3fa0037e74c8a1234db7ba986d803" dependencies = [ "crossbeam-channel", "crossbeam-utils", @@ -4210,18 +4447,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.1" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "adler", + "adler2", ] [[package]] name = "mio" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4569e456d394deccd22ce1c1913e6ea0e54519f577285001215d33557431afe4" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ "hermit-abi 0.3.9", "libc", @@ -4254,17 +4491,16 @@ dependencies = [ [[package]] name = "multimap" -version = "0.8.3" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" [[package]] name = "native-tls" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" +checksum = "a8614eb2c83d59d1c8cc974dd3f920198647674a0a035e1af1fa58707e317466" dependencies = [ - "lazy_static", "libc", "log", "openssl", @@ -4276,14 +4512,21 @@ dependencies = [ "tempfile", ] +[[package]] +name = "new_debug_unreachable" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" + [[package]] name = "nix" -version = "0.27.1" +version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" +checksum = "71e2746dc3a24dd78b3cfcb7be93368c6de9963d30f43a6a73998a9cf4b17b46" dependencies = [ "bitflags 2.6.0", "cfg-if", + "cfg_aliases", "libc", ] @@ -4484,11 +4727,11 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" dependencies = [ - "num_enum_derive 0.7.2", + "num_enum_derive 0.7.3", ] [[package]] @@ -4499,27 +4742,27 @@ checksum = "96667db765a921f7b295ffee8b60472b686a51d4f21c2ee4ffdb94c7013b65a6" dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "num_enum_derive" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "object" -version = "0.32.1" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] @@ -4532,15 +4775,15 @@ checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" [[package]] name = "oorandom" -version = "11.1.3" +version = "11.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ab1bc2a289d34bd04a330323ac98a1b4bc82c9d9fcb1e66b63caa84da26b575" +checksum = "b410bbe7e14ab526a0e86877eb47c6996a2bd7746f027ba551028c925390e4e9" [[package]] name = "opaque-debug" -version = "0.3.0" +version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" [[package]] name = "openssl" @@ -4564,8 +4807,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -4622,7 +4865,7 @@ dependencies = [ "bytes", "http 1.1.0", "opentelemetry", - "reqwest 0.12.5", + "reqwest 0.12.7", ] [[package]] @@ -4638,8 +4881,8 @@ dependencies = [ "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", - "prost 0.13.1", - "reqwest 0.12.5", + "prost 0.13.3", + "reqwest 0.12.7", "thiserror", "tokio", "tonic", @@ -4653,7 +4896,7 @@ checksum = "30ee9f20bff9c984511a02f082dc8ede839e4a9bf15cc2487c8d6fea5ad850d9" dependencies = [ "opentelemetry", "opentelemetry_sdk", - "prost 0.13.1", + "prost 0.13.3", "tonic", ] @@ -4695,13 +4938,13 @@ dependencies = [ [[package]] name = "os_info" -version = "3.7.0" +version = "3.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "006e42d5b888366f1880eda20371fedde764ed2213dc8496f49622fa0c99cd5e" +checksum = "ae99c7fa6dd38c7cafe1ec085e804f8f555a2f8659b0dbe03f1f9963a9b51092" dependencies = [ "log", "serde", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -4734,7 +4977,7 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "306800abfa29c7f16596b5970a588435e3d5b3149683d00c12b699cc19f895ee" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "bitvec", "byte-slice-cast", "impl-trait-for-tuples", @@ -4748,23 +4991,23 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" +checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" dependencies = [ "lock_api", "parking_lot_core", @@ -4772,22 +5015,22 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" +checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] name = "paste" -version = "1.0.14" +version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pbkdf2" @@ -4831,9 +5074,9 @@ checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" [[package]] name = "pest" -version = "2.7.6" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f200d8d83c44a45b21764d1916299752ca035d15ecd46faca3e9a2a2bf6ad06" +checksum = "fdbef9d1d47087a895abd220ed25eb4ad973a5e26f6a4367b038c25e28dfc2d9" dependencies = [ "memchr", "thiserror", @@ -4842,9 +5085,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.6" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcd6ab1236bbdb3a49027e920e693192ebfe8913f6d60e294de57463a493cfde" +checksum = "4d3a6e3394ec80feb3b6393c725571754c6188490265c61aaf260810d6b95aa0" dependencies = [ "pest", "pest_generator", @@ -4852,22 +5095,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.6" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a31940305ffc96863a735bef7c7994a00b325a7138fdbc5bda0f1a0476d3275" +checksum = "94429506bde1ca69d1b5601962c73f4172ab4726571a59ea95931218cb0e930e" dependencies = [ "pest", "pest_meta", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "pest_meta" -version = "2.7.6" +version = "2.7.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7ff62f5259e53b78d1af898941cdcdccfae7385cf7d793a6e55de5d05bb4b7d" +checksum = "ac8a071862e93690b6e34e9a5fb8e33ff3734473ac0245b27232222c4906a33f" dependencies = [ "once_cell", "pest", @@ -4876,39 +5119,54 @@ dependencies = [ [[package]] name = "petgraph" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" +checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.1.0", + "indexmap 2.5.0", +] + +[[package]] +name = "phf_shared" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6796ad771acdc0123d2a88dc428b5e38ef24456743ddb1744ed628f9815c096" +dependencies = [ + "siphasher 0.3.11", ] +[[package]] +name = "pico-args" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5be167a7af36ee22fe3115051bc51f6e6c7054c9348e28deb4f49bd6f705a315" + [[package]] name = "pin-project" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fda4ed1c6c173e3fc7a83629421152e01d7b1f9b7f65fb301e490e8cfc656422" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.3" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4359fd9c9171ec6e8c62926d6faaf553a8dc3f64e1507e76da7911b4f6a04405" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -4933,9 +5191,9 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" dependencies = [ - "der 0.7.8", + "der 0.7.9", "pkcs8 0.10.2", - "spki 0.7.2", + "spki 0.7.3", ] [[package]] @@ -4954,21 +5212,21 @@ version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.8", - "spki 0.7.2", + "der 0.7.9", + "spki 0.7.3", ] [[package]] name = "pkg-config" -version = "0.3.27" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26072860ba924cbfa98ea39c8c19b4dd6a4a25423dbdf219c1eca91aa0cf6964" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "plotters" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2c224ba00d7cadd4d5c660deaf2098e5e80e07846537c51f9cfa4be50c1fd45" +checksum = "5aeb6f403d7a4911efb1e33402027fc44f29b5bf6def3effcc22d7bb75f2b747" dependencies = [ "num-traits", "plotters-backend", @@ -4979,15 +5237,15 @@ dependencies = [ [[package]] name = "plotters-backend" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e76628b4d3a7581389a35d5b6e2139607ad7c75b17aed325f210aa91f4a9609" +checksum = "df42e13c12958a16b3f7f4386b9ab1f3e7933914ecea48da7139435263a4172a" [[package]] name = "plotters-svg" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f6d39893cca0701371e3c27294f09797214b86f1fb951b89ade8ec04e2abab" +checksum = "51bae2ac328883f7acdfea3d66a7c35751187f870bc81f94563733a154d7a670" dependencies = [ "plotters-backend", ] @@ -5020,9 +5278,9 @@ dependencies = [ [[package]] name = "polyval" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52cff9d1d4dee5fe6d03729099f4a310a41179e0a10dbf542039873f2e826fb" +checksum = "9d1fe60d06143b2430aa532c94cfe9e29783047f06c0d7fd359a9a51b729fa25" dependencies = [ "cfg-if", "cpufeatures", @@ -5038,15 +5296,24 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "precomputed-hash" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" [[package]] name = "pretty_assertions" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cee1a6c8a5b9208b3cb1061f10c0cb689087b3d8ce85fb9d2dd7a29b6ba66" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" dependencies = [ "diff", "yansi", @@ -5054,12 +5321,12 @@ dependencies = [ [[package]] name = "prettyplease" -version = "0.2.15" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae005bd773ab59b4725093fd7df83fd7892f7d8eafb48dbd7de6e024e4215f9d" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2 1.0.86", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -5097,20 +5364,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" -dependencies = [ - "toml_edit 0.20.2", -] - -[[package]] -name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit 0.22.22", ] [[package]] @@ -5121,7 +5379,7 @@ checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" dependencies = [ "proc-macro-error-attr", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", "version_check", ] @@ -5133,7 +5391,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "version_check", ] @@ -5163,9 +5421,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" dependencies = [ "dtoa", "itoa", @@ -5180,76 +5438,75 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "prost" -version = "0.12.1" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4fdd22f3b9c31b53c060df4a0613a1c7f062d4115a2b984dd15b1858f7e340d" +checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" dependencies = [ "bytes", - "prost-derive 0.12.1", + "prost-derive 0.12.6", ] [[package]] name = "prost" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", - "prost-derive 0.13.1", + "prost-derive 0.13.3", ] [[package]] name = "prost-build" -version = "0.12.1" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8bdf592881d821b83d471f8af290226c8d51402259e9bb5be7f9f8bdebbb11ac" +checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", - "heck 0.4.1", - "itertools 0.10.5", + "heck 0.5.0", + "itertools 0.12.1", "log", "multimap", "once_cell", "petgraph", "prettyplease", - "prost 0.12.1", + "prost 0.12.6", "prost-types", "regex", - "syn 2.0.72", + "syn 2.0.77", "tempfile", - "which", ] [[package]] name = "prost-derive" -version = "0.12.1" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "265baba7fabd416cf5078179f7d2cbeca4ce7a9041111900675ea7c4cb8a4c32" +checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools 0.10.5", + "itertools 0.12.1", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "prost-derive" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", - "itertools 0.12.0", + "itertools 0.13.0", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -5258,11 +5515,11 @@ version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "057237efdb71cf4b3f9396302a3d6599a92fa94063ba537b66130980ea9909f3" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "logos", "miette", "once_cell", - "prost 0.12.1", + "prost 0.12.6", "prost-types", "serde", "serde-value", @@ -5270,11 +5527,11 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.1" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e081b29f63d83a4bc75cfc9f3fe424f9156cf92d8a4f0c9407cce9a1b67327cf" +checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" dependencies = [ - "prost 0.12.1", + "prost 0.12.6", ] [[package]] @@ -5285,7 +5542,7 @@ checksum = "00bb76c5f6221de491fe2c8f39b106330bbd9762c6511119c07940e10eb9ff11" dependencies = [ "bytes", "miette", - "prost 0.12.1", + "prost 0.12.6", "prost-reflect", "prost-types", "protox-parse", @@ -5320,17 +5577,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16b845dbfca988fa33db069c0e230574d15a3088f147a87b64c7589eb662c9ac" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "pulldown-cmark" -version = "0.9.3" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a1a2f1f0a7ecff9c31abbe177637be0e97a0aef46cf8738ece09327985d998" +checksum = "57206b407293d2bcd3af849ce869d52068623f19e1b5ff8e8778e3309439682b" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", "memchr", "unicase", ] @@ -5377,9 +5634,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2 1.0.86", ] @@ -5497,11 +5754,22 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.4.1" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" +checksum = "355ae415ccd3a04315d3f8246e86d67689ea74d88d915576e1589a351062a13b" dependencies = [ - "bitflags 1.3.2", + "bitflags 2.6.0", +] + +[[package]] +name = "redox_users" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" +dependencies = [ + "getrandom", + "libredox", + "thiserror", ] [[package]] @@ -5513,7 +5781,7 @@ dependencies = [ "aho-corasick", "memchr", "regex-automata 0.4.7", - "regex-syntax 0.8.2", + "regex-syntax 0.8.4", ] [[package]] @@ -5533,7 +5801,7 @@ checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.4", ] [[package]] @@ -5544,34 +5812,34 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" +checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" [[package]] name = "rend" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2571463863a6bd50c32f94402933f03457a3fbaf697a707c5be741e459f08fd" +checksum = "71fe3824f5629716b1589be05dacd749f6aa084c87e00e016714a8cdfccc997c" dependencies = [ "bytecheck", ] [[package]] name = "reqwest" -version = "0.11.22" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046cd98826c46c2ac8ddecae268eb5c2e58628688a5fc7a2643704a73faba95b" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", "bytes", "encoding_rs", "futures-core", "futures-util", "h2 0.3.26", - "http 0.2.9", + "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-tls 0.5.0", "ipnet", "js-sys", @@ -5581,10 +5849,12 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", + "rustls-pemfile 1.0.4", "serde", "serde_json", "serde_urlencoded", - "system-configuration", + "sync_wrapper 0.1.2", + "system-configuration 0.5.1", "tokio", "tokio-native-tls", "tower-service", @@ -5592,14 +5862,14 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.50.0", + "winreg", ] [[package]] name = "reqwest" -version = "0.12.5" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "base64 0.22.1", "bytes", @@ -5607,12 +5877,12 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.3.1", - "hyper-rustls 0.27.2", + "hyper 1.4.1", + "hyper-rustls 0.27.3", "hyper-tls 0.6.0", "hyper-util", "ipnet", @@ -5624,12 +5894,12 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.3", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.1", - "system-configuration", + "system-configuration 0.6.1", "tokio", "tokio-native-tls", "tokio-util", @@ -5639,19 +5909,19 @@ dependencies = [ "wasm-bindgen-futures", "wasm-streams", "web-sys", - "winreg 0.52.0", + "windows-registry", ] [[package]] name = "reqwest-middleware" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "39346a33ddfe6be00cbc17a34ce996818b97b230b87229f10114693becca1268" +checksum = "562ceb5a604d3f7c885a792d42c199fd8af239d0a51b2fa6a78aafa092452b04" dependencies = [ "anyhow", "async-trait", "http 1.1.0", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "thiserror", "tower-service", @@ -5664,7 +5934,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "82900c877a0ba5362ac5756efbd82c5b795dc509011c1253e2389d8708f1389d" dependencies = [ "addchain", - "arrayvec 0.7.4", + "arrayvec 0.7.6", "blake2 0.10.6", "byteorder", "derivative", @@ -5705,23 +5975,24 @@ dependencies = [ [[package]] name = "ring" -version = "0.17.7" +version = "0.17.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "688c63d65483050968b2a8937f7995f443e27041a0f7700aa59b0822aedebb74" +checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", + "cfg-if", "getrandom", "libc", "spin", "untrusted", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "rkyv" -version = "0.7.43" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527a97cdfef66f65998b5f3b637c26f5a5ec09cc52a3f9932313ac645f4190f5" +checksum = "9008cd6385b9e161d8229e1f6549dd23c3d022f132a2ea37ac3a10ac4935779b" dependencies = [ "bitvec", "bytecheck", @@ -5737,12 +6008,12 @@ dependencies = [ [[package]] name = "rkyv_derive" -version = "0.7.43" +version = "0.7.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5c462a1328c8e67e4d6dbad1eb0355dd43e8ab432c6e227a43657f16ade5033" +checksum = "503d1d27590a2b0a3a4ca4c94755aa2875657196ecbf401a42eff41d7de532c0" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -5774,9 +6045,9 @@ checksum = "afab94fb28594581f62d981211a9a4d53cc8130bbcbbb89a0440d9b8e81a7746" [[package]] name = "rsa" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af6c4b23d99685a1408194da11270ef8e9809aff951cc70ec9b17350b087e474" +checksum = "5d0e5124fcb30e76a7e79bfee683a2746db83784b86289f6251b54b7950a0dfc" dependencies = [ "const-oid", "digest 0.10.7", @@ -5787,18 +6058,18 @@ dependencies = [ "pkcs8 0.10.2", "rand_core 0.6.4", "signature 2.2.0", - "spki 0.7.2", + "spki 0.7.3", "subtle", "zeroize", ] [[package]] name = "rust_decimal" -version = "1.33.1" +version = "1.36.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06676aec5ccb8fc1da723cc8c0f9a46549f21ebb8753d3915c6c41db1e7f1dc4" +checksum = "b082d80e3e3cc52b2ed634388d436fe1f4de6af5786cc2de9ba9737527bdf555" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "borsh", "bytes", "num-traits", @@ -5810,9 +6081,9 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" +checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" [[package]] name = "rustc-hash" @@ -5828,18 +6099,18 @@ checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -5869,23 +6140,23 @@ dependencies = [ "log", "ring", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "aws-lc-rs", "log", "once_cell", "ring", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -5904,12 +6175,12 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.0" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" dependencies = [ "openssl-probe", - "rustls-pemfile 2.0.0", + "rustls-pemfile 2.1.3", "rustls-pki-types", "schannel", "security-framework", @@ -5921,40 +6192,40 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c74cae0a4cf6ccbbf5f359f08efdf8ee7e1dc532573bf0db71968cb56b1448c" dependencies = [ - "base64 0.21.5", + "base64 0.21.7", ] [[package]] name = "rustls-pemfile" -version = "2.0.0" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e4980fa29e4c4b212ffb3db068a564cbf560e51d3944b7c88bd8bf5bec64f4" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ - "base64 0.21.5", + "base64 0.22.1", "rustls-pki-types", ] [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "fc0a2ce646f8655401bb81e7927b812614bd5d91dbc968696be50603510fcaf0" [[package]] name = "rustls-platform-verifier" -version = "0.3.2" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e3beb939bcd33c269f4bf946cc829fcd336370267c4a927ac0399c84a3151a1" +checksum = "afbb878bdfdf63a336a5e63561b1835e7a8c91524f51621db870169eac84b490" dependencies = [ "core-foundation", "core-foundation-sys", "jni", "log", "once_cell", - "rustls 0.23.10", - "rustls-native-certs 0.7.0", + "rustls 0.23.13", + "rustls-native-certs 0.7.3", "rustls-platform-verifier-android", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "security-framework", "security-framework-sys", "webpki-roots", @@ -5963,9 +6234,9 @@ dependencies = [ [[package]] name = "rustls-platform-verifier-android" -version = "0.1.0" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84e217e7fdc8466b5b35d30f8c0a30febd29173df4a3a0c2115d306b9c4117ad" +checksum = "f87165f0995f63a9fbeea62b64d10b4d9d8e78ec6d7d51fb2125fda7bb36788f" [[package]] name = "rustls-webpki" @@ -5979,9 +6250,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "aws-lc-rs", "ring", @@ -5991,9 +6262,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ffc183a10b4478d04cbbbfc96d0873219d962dd5accaff2ffbd4ceb7df837f4" +checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" [[package]] name = "ruzstd" @@ -6008,9 +6279,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.15" +version = "1.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ad4cc8da4ef723ed60bced201181d83791ad433213d8c24efffda1eec85d741" +checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" [[package]] name = "same-file" @@ -6056,7 +6327,7 @@ dependencies = [ "darling 0.14.4", "proc-macro-crate 1.3.1", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -6084,7 +6355,7 @@ dependencies = [ "darling 0.14.4", "proc-macro-crate 1.3.1", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -6108,9 +6379,9 @@ version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -6121,9 +6392,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "00860983481ac590ac87972062909bef0d6a658013b592ccc0f2feb272feab11" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "scale-info", - "syn 2.0.72", + "syn 2.0.77", "thiserror", ] @@ -6149,11 +6420,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.22" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c3733bf4cf7ea0880754e19cb5a462007c4a8c1914bff372ccc95b464f1df88" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.59.0", ] [[package]] @@ -6164,7 +6435,7 @@ checksum = "8de18f6d8ba0aad7045f5feae07ec29899c1112584a38509a84ad7b04451eaa0" dependencies = [ "aead", "arrayref", - "arrayvec 0.7.4", + "arrayvec 0.7.6", "curve25519-dalek", "getrandom_or_panic", "merlin", @@ -6218,7 +6489,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ "base16ct 0.2.0", - "der 0.7.8", + "der 0.7.9", "generic-array", "pkcs8 0.10.2", "subtle", @@ -6255,9 +6526,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", "core-foundation", @@ -6269,9 +6540,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -6282,7 +6553,7 @@ name = "selector_generator" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.6", + "clap 4.5.18", "ethabi", "glob", "hex", @@ -6308,13 +6579,13 @@ checksum = "f638d531eccd6e23b980caf34876660d38e265409d8e99b397ab71eb3612fad0" [[package]] name = "sentry" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0097a48cd1999d983909f07cb03b15241c5af29e5e679379efac1c06296abecc" +checksum = "6ce4b57f1b521f674df7a1d200be8ff5d74e3712020ee25b553146657b5377d5" dependencies = [ "httpdate", "native-tls", - "reqwest 0.11.22", + "reqwest 0.11.27", "sentry-backtrace", "sentry-contexts", "sentry-core", @@ -6327,9 +6598,9 @@ dependencies = [ [[package]] name = "sentry-backtrace" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18a7b80fa1dd6830a348d38a8d3a9761179047757b7dca29aef82db0118b9670" +checksum = "58cc8d4e04a73de8f718dc703943666d03f25d3e9e4d0fb271ca0b8c76dfa00e" dependencies = [ "backtrace", "once_cell", @@ -6339,9 +6610,9 @@ dependencies = [ [[package]] name = "sentry-contexts" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7615dc588930f1fd2e721774f25844ae93add2dbe2d3c2f995ce5049af898147" +checksum = "6436c1bad22cdeb02179ea8ef116ffc217797c028927def303bc593d9320c0d1" dependencies = [ "hostname", "libc", @@ -6353,9 +6624,9 @@ dependencies = [ [[package]] name = "sentry-core" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f51264e4013ed9b16558cce43917b983fa38170de2ca480349ceb57d71d6053" +checksum = "901f761681f97db3db836ef9e094acdd8756c40215326c194201941947164ef1" dependencies = [ "once_cell", "rand 0.8.5", @@ -6366,9 +6637,9 @@ dependencies = [ [[package]] name = "sentry-debug-images" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fe6180fa564d40bb942c9f0084ffb5de691c7357ead6a2b7a3154fae9e401dd" +checksum = "afdb263e73d22f39946f6022ed455b7561b22ff5553aca9be3c6a047fa39c328" dependencies = [ "findshlibs", "once_cell", @@ -6377,9 +6648,9 @@ dependencies = [ [[package]] name = "sentry-panic" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "323160213bba549f9737317b152af116af35c0410f4468772ee9b606d3d6e0fa" +checksum = "74fbf1c163f8b6a9d05912e1b272afa27c652e8b47ea60cb9a57ad5e481eea99" dependencies = [ "sentry-backtrace", "sentry-core", @@ -6387,9 +6658,9 @@ dependencies = [ [[package]] name = "sentry-tracing" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38033822128e73f7b6ca74c1631cef8868890c6cb4008a291cf73530f87b4eac" +checksum = "82eabcab0a047040befd44599a1da73d3adb228ff53b5ed9795ae04535577704" dependencies = [ "sentry-backtrace", "sentry-core", @@ -6399,9 +6670,9 @@ dependencies = [ [[package]] name = "sentry-types" -version = "0.31.7" +version = "0.31.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e663b3eb62ddfc023c9cf5432daf5f1a4f6acb1df4d78dd80b740b32dd1a740" +checksum = "da956cca56e0101998c8688bc65ce1a96f00673a0e58e663664023d4c7911e82" dependencies = [ "debugid", "hex", @@ -6455,8 +6726,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -6473,14 +6744,33 @@ dependencies = [ [[package]] name = "serde_path_to_error" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4beec8bce849d58d06238cb50db2e1c417cfeafa4c63f692b15c82b7c80f8335" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" dependencies = [ "itoa", "serde", ] +[[package]] +name = "serde_regex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8136f1a4ea815d7eac4101cfd0b16dc0cb5e1fe1b8609dfd728058656b7badf" +dependencies = [ + "regex", + "serde", +] + +[[package]] +name = "serde_spanned" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -6513,17 +6803,17 @@ checksum = "e182d6ec6f05393cc0e5ed1bf81ad6db3a8feedf8ee515ecdd369809bcce8082" dependencies = [ "darling 0.13.4", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "serde_yaml" -version = "0.9.25" +version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a49e178e4452f45cb61d0cd8cebc1b0fafd3e41929e996cef79aa3aca91f574" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.5.0", "itoa", "ryu", "serde", @@ -6638,9 +6928,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.1" +version = "1.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8229b473baa5980ac72ef434c4415e70c4b5e71b423043adb4ba059f89c99a1" +checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" dependencies = [ "libc", ] @@ -6667,15 +6957,15 @@ dependencies = [ [[package]] name = "simdutf8" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f27f6278552951f1f2b8cf9da965d10969b2efdea95a6ec47987ab46edfe263a" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] name = "similar" -version = "2.3.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2aeaf503862c419d66959f5d7ca015337d864e9c49485d771b732e2a20453597" +checksum = "1de1d4f81173b03af4c0cbed3c898f6bff5b870e4a7f5d6f4057d62a7a4b686e" [[package]] name = "simple_asn1" @@ -6689,6 +6979,12 @@ dependencies = [ "time", ] +[[package]] +name = "siphasher" +version = "0.3.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" + [[package]] name = "siphasher" version = "1.0.1" @@ -6731,9 +7027,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" dependencies = [ "serde", ] @@ -6744,7 +7040,7 @@ version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a33bd3e260892199c3ccfc487c88b2da2265080acb316cd920da72fdfd7c599f" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-executor", "async-fs", "async-io", @@ -6761,10 +7057,10 @@ version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6d1eaa97d77be4d026a1e7ffad1bb3b78448763b357ea6f8188d3e6f736a9b9" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "async-lock", "atomic-take", - "base64 0.21.5", + "base64 0.21.7", "bip39", "blake2-rfc", "bs58", @@ -6780,7 +7076,7 @@ dependencies = [ "hashbrown 0.14.5", "hex", "hmac 0.12.1", - "itertools 0.12.0", + "itertools 0.12.1", "libm", "libsecp256k1", "merlin", @@ -6800,7 +7096,7 @@ dependencies = [ "serde_json", "sha2 0.10.8", "sha3 0.10.8", - "siphasher", + "siphasher 1.0.1", "slab", "smallvec", "soketto 0.7.1", @@ -6816,9 +7112,9 @@ version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5496f2d116b7019a526b1039ec2247dd172b8670633b1a64a614c9ea12c9d8c7" dependencies = [ - "async-channel", + "async-channel 2.3.1", "async-lock", - "base64 0.21.5", + "base64 0.21.7", "blake2-rfc", "derive_more 0.99.18", "either", @@ -6829,7 +7125,7 @@ dependencies = [ "futures-util", "hashbrown 0.14.5", "hex", - "itertools 0.12.0", + "itertools 0.12.1", "log", "lru", "no-std-net", @@ -6839,7 +7135,7 @@ dependencies = [ "rand_chacha", "serde", "serde_json", - "siphasher", + "siphasher 1.0.1", "slab", "smol", "smoldot", @@ -6885,12 +7181,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -6959,12 +7255,12 @@ dependencies = [ [[package]] name = "spki" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d1e996ef02c474957d681f1b05213dfb0abab947b446a62d37770b23500184a" +checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.8", + "der 0.7.9", ] [[package]] @@ -6975,20 +7271,19 @@ checksum = "c85070f382340e8b23a75808e83573ddf65f9ad9143df9573ca37c1ed2ee956a" [[package]] name = "sqlformat" -version = "0.2.3" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce81b7bd7c4493975347ef60d8c7e8b742d4694f4c49f93e0a12ea263938176c" +checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" dependencies = [ - "itertools 0.12.0", "nom", "unicode_categories", ] [[package]] name = "sqlx" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfa89bea9500db4a0d038513d7a060566bfc51d46d1c014847049a45cce85e8" +checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e" dependencies = [ "sqlx-core", "sqlx-macros", @@ -6999,9 +7294,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06e2f2bd861719b1f3f0c7dbe1d80c30bf59e76cf019f07d9014ed7eefb8e08" +checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e" dependencies = [ "atoi", "bigdecimal", @@ -7020,7 +7315,7 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.1.0", + "indexmap 2.5.0", "ipnetwork", "log", "memchr", @@ -7043,22 +7338,22 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" +checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "sqlx-core", "sqlx-macros-core", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] name = "sqlx-macros-core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d100558134176a2629d46cec0c8891ba0be8910f7896abfdb75ef4ab6f4e7ce" +checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5" dependencies = [ "dotenvy", "either", @@ -7066,7 +7361,7 @@ dependencies = [ "hex", "once_cell", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "serde", "serde_json", "sha2 0.10.8", @@ -7074,7 +7369,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.72", + "syn 2.0.77", "tempfile", "tokio", "url", @@ -7082,9 +7377,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cac0ab331b14cb3921c62156d913e4c15b74fb6ec0f3146bd4ef6e4fb3c12" +checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a" dependencies = [ "atoi", "base64 0.22.1", @@ -7127,9 +7422,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9734dbce698c67ecf67c442f768a5e90a49b2a4d61a9f1d59f73874bd4cf0710" +checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8" dependencies = [ "atoi", "base64 0.22.1", @@ -7170,9 +7465,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75b419c3c1b1697833dd927bdc4c6545a620bc1bbafabd44e1efbe9afcd337e" +checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680" dependencies = [ "atoi", "chrono", @@ -7204,15 +7499,28 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +[[package]] +name = "string_cache" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f91138e76242f575eb1d3b38b4f1362f10d3a43f47d182a5b359af488a02293b" +dependencies = [ + "new_debug_unreachable", + "once_cell", + "parking_lot", + "phf_shared", + "precomputed-hash", +] + [[package]] name = "stringprep" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb41d74e231a107a1b4ee36bd1214b11285b77768d2e3824aedafa988fd36ee6" +checksum = "7b4df3d392d81bd458a8a621b8bffbd2302a12ffe288a9d931670948749463b1" dependencies = [ - "finl_unicode", "unicode-bidi", "unicode-normalization", + "unicode-properties", ] [[package]] @@ -7253,7 +7561,7 @@ dependencies = [ "heck 0.3.3", "proc-macro-error", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -7274,16 +7582,16 @@ checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" dependencies = [ "heck 0.5.0", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "rustversion", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "subxt" @@ -7333,11 +7641,11 @@ dependencies = [ "jsonrpsee 0.21.0", "parity-scale-codec", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "scale-info", "scale-typegen", "subxt-metadata", - "syn 2.0.72", + "syn 2.0.77", "thiserror", "tokio", ] @@ -7368,10 +7676,10 @@ dependencies = [ "darling 0.20.10", "parity-scale-codec", "proc-macro-error", - "quote 1.0.36", + "quote 1.0.37", "scale-typegen", "subxt-codegen", - "syn 2.0.72", + "syn 2.0.77", ] [[package]] @@ -7426,18 +7734,18 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "unicode-ident", ] [[package]] name = "syn" -version = "2.0.72" +version = "2.0.77" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc4b9b9bf2add8093d3f2c0204471e951b2285580335de42f9d2534f3ae7a8af" +checksum = "9f35bcdf61fd8e7be6caf75f429fdca8beb3ed76584befb503b1569faee373ed" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "unicode-ident", ] @@ -7449,8 +7757,8 @@ checksum = "1329189c02ff984e9736652b1631330da25eaa6bc639089ed4915d25446cbe7b" dependencies = [ "proc-macro-error", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -7464,6 +7772,9 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] [[package]] name = "system-configuration" @@ -7473,7 +7784,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -7486,6 +7808,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "system-constants-generator" version = "0.1.0" @@ -7514,14 +7846,26 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.10.1" +version = "3.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "04cbcdd0c794ebb0d4cf35e88edd2f7d2c4c3e9a5a6dab322839b321c6a87a64" dependencies = [ "cfg-if", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", +] + +[[package]] +name = "term" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c59df8ac95d96ff9bede18eb7300b0fda5e5d8d90960e76f8e14ae765eedbf1f" +dependencies = [ + "dirs-next", + "rustversion", + "winapi", ] [[package]] @@ -7535,43 +7879,44 @@ dependencies = [ [[package]] name = "test-casing" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2378d657757969a2cec9ec4eb616be8f01be98c21c6467991f91cb182e4653b" +checksum = "f4d233764420cbfe244e6a50177798a01b20184df210eb626898cd1b20c06633" dependencies = [ "test-casing-macro", ] [[package]] name = "test-casing-macro" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cfbe7811249c4c914b06141b8ac0f2cee2733fb883d05eb19668a45fc60c3d5" +checksum = "f9b53c7124dd88026d5d98a1eb1fd062a578b7d783017c9298825526c7fb6427" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "test-log" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b319995299c65d522680decf80f2c108d85b861d81dfe340a10d16cee29d9e6" +checksum = "3dffced63c2b5c7be278154d76b479f9f9920ed34e7574201407f0b14e2bbb93" dependencies = [ "env_logger", "test-log-macros", + "tracing-subscriber", ] [[package]] name = "test-log-macros" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8f546451eaa38373f549093fe9fd05e7d2bade739e2ddf834b9968621d60107" +checksum = "5999e24eaa32083191ba4e425deb75cdf25efefabe5aaccb7446dd0d4122a3f5" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -7585,35 +7930,35 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "222a222a5bfe1bba4a77b45ec488a741b3cb8872e5e499451fd7d0129c9c7c3d" +checksum = "23d434d3f8967a09480fb04132ebe0a3e088c173e6d0ee7897abbdf4eab0f8b9" [[package]] name = "thiserror" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0342370b38b6a11b6cc11d6a805569958d54cfa061a29969c3b5ce2ea405724" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.63" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4558b58466b9ad7ca0f102865eccc95938dca1a74a856f2b57b6629050da261" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "thread_local" -version = "1.1.7" +version = "1.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +checksum = "8b9ef9bad013ada3808854ceac7b46812a6465ba368859a37e2100283d2d719c" dependencies = [ "cfg-if", "once_cell", @@ -7709,9 +8054,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -7737,9 +8082,9 @@ dependencies = [ [[package]] name = "tokio" -version = "1.39.1" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d040ac2b29ab03b09d4129c2f5bbd012a3ac2f79d38ff506a4bf8dd34b0eac8a" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", @@ -7760,8 +8105,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -7801,16 +8146,16 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.10", + "rustls 0.23.13", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -7833,69 +8178,75 @@ dependencies = [ ] [[package]] -name = "toml_datetime" -version = "0.6.6" +name = "toml" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit 0.22.22", +] [[package]] -name = "toml_edit" -version = "0.19.15" +name = "toml_datetime" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ - "indexmap 2.1.0", - "toml_datetime", - "winnow", + "serde", ] [[package]] name = "toml_edit" -version = "0.20.2" +version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" +checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.5.0", "toml_datetime", - "winnow", + "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.21.1" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.1.0", + "indexmap 2.5.0", + "serde", + "serde_spanned", "toml_datetime", - "winnow", + "winnow 0.6.20", ] [[package]] name = "tonic" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", "axum", "base64 0.22.1", "bytes", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.3.1", + "hyper 1.4.1", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", - "prost 0.13.1", + "prost 0.13.3", "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -7921,6 +8272,22 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "tower-http" version = "0.5.2" @@ -7930,7 +8297,7 @@ dependencies = [ "bitflags 2.6.0", "bytes", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "pin-project-lite", "tokio", @@ -7940,15 +8307,15 @@ dependencies = [ [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -7969,8 +8336,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -7983,17 +8350,6 @@ dependencies = [ "valuable", ] -[[package]] -name = "tracing-log" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f751112709b4e791d8ce53e32c4ed2d353565a795ce84da2285393f41557bdf2" -dependencies = [ - "log", - "once_cell", - "tracing-core", -] - [[package]] name = "tracing-log" version = "0.2.0" @@ -8018,7 +8374,7 @@ dependencies = [ "smallvec", "tracing", "tracing-core", - "tracing-log 0.2.0", + "tracing-log", "tracing-subscriber", "web-time", ] @@ -8035,9 +8391,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.17" +version = "0.3.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" dependencies = [ "matchers", "nu-ansi-term", @@ -8051,35 +8407,34 @@ dependencies = [ "time", "tracing", "tracing-core", - "tracing-log 0.1.4", + "tracing-log", "tracing-serde", ] [[package]] name = "triomphe" -version = "0.1.9" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee8098afad3fb0c54a9007aab6804558410503ad676d4633f9c2559a00ac0f" +checksum = "e6631e42e10b40c0690bf92f404ebcfe6e1fdb480391d15f17cc8e96eeed5369" [[package]] name = "try-lock" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3528ecfd12c466c6f163363caf2d02a71161dd5e1cc6ae7b34207ea2d42d81ed" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" [[package]] name = "trybuild" -version = "1.0.86" +version = "1.0.99" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8419ecd263363827c5730386f418715766f584e2f874d32c23c5b00bd9727e7e" +checksum = "207aa50d36c4be8d8c6ea829478be44a372c6a77669937bb39c698e52f1491e8" dependencies = [ - "basic-toml", "glob", - "once_cell", "serde", "serde_derive", "serde_json", "termcolor", + "toml", ] [[package]] @@ -8143,15 +8498,15 @@ dependencies = [ [[package]] name = "unicode-bidi" -version = "0.3.13" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "92888ba5573ff080736b3648696b70cafad7d250551175acbaa4e0385b3e1460" +checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-normalization" @@ -8162,17 +8517,23 @@ dependencies = [ "tinyvec", ] +[[package]] +name = "unicode-properties" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" + [[package]] name = "unicode-segmentation" -version = "1.10.1" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.11" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e51733f11c9c4f72aa0c160008246859e340b00807569a0da0e7a1079b27ba85" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" @@ -8182,9 +8543,9 @@ checksum = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc" [[package]] name = "unicode-xid" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "unicode_categories" @@ -8208,15 +8569,15 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ad948c1cb799b1a70f836077721a92a35ac177d4daddf4c20a633786d4cf618" dependencies = [ - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] [[package]] name = "unsafe-libyaml" -version = "0.2.10" +version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab4c90930b95a82d00dc9e9ac071b4991924390d46cbd0dfe566148667605e4b" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" [[package]] name = "untrusted" @@ -8226,11 +8587,11 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.8.0" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5ccd538d4a604753ebc2f17cd9946e89b77bf87f6a8e2309667c6f2e87855e3" +checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" dependencies = [ - "base64 0.21.5", + "base64 0.22.1", "log", "native-tls", "once_cell", @@ -8257,15 +8618,15 @@ checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" [[package]] name = "utf8parse" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" +checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.5.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ad59a7560b41a70d191093a945f0b87bc1deeda46fb237479708a1d6b6cdfc" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "serde", ] @@ -8276,6 +8637,12 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" +[[package]] +name = "value-bag" +version = "1.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a84c137d37ab0142f0f2ddfe332651fdbf252e7b7dbb4e67b6c1f1b2e925101" + [[package]] name = "vcpkg" version = "0.2.15" @@ -8303,9 +8670,9 @@ dependencies = [ [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "vise" @@ -8327,7 +8694,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "671d3b894d5d0849f0a597f56bf071f42d4f2a1cbcf2f78ca21f870ab7c0cc2b" dependencies = [ - "hyper 0.14.29", + "hyper 0.14.30", "once_cell", "tokio", "tracing", @@ -8341,8 +8708,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a511871dc5de990a3b2a0e715facfbc5da848c0c0395597a1415029fb7c250a" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -8365,9 +8732,9 @@ dependencies = [ [[package]] name = "walkdir" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d71d857dc86794ca4c280d616f7da00d2dbfd8cd788846559a6813e6aa4b54ee" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" dependencies = [ "same-file", "winapi-util", @@ -8402,34 +8769,35 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.87" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7706a72ab36d8cb1f80ffbf0e071533974a60d0a308d01a5d0375bf60499a342" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.87" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ef2b6d3c510e9625e5fe6f509ab07d66a760f0885d858736483c32ed7809abd" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.37" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c02dbc21516f9f1f04f187958890d7e6026df8d16540b7ad9492bc34a67cea03" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -8439,32 +8807,32 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.87" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dee495e55982a3bd48105a7b947fd2a9b4a8ae3010041b9e0faab3f9cd028f1d" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ - "quote 1.0.36", + "quote 1.0.37", "wasm-bindgen-macro-support", ] [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.87" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54681b18a46765f095758388f2d0cf16eb8d4169b639ab575a8f5693af210c7b" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.87" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca6ad05a4870b2bf5fe995117d3728437bd27d7cd5f06f13c17443ef369775a1" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "wasm-streams" @@ -8521,9 +8889,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.64" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b85cbef8c220a6abc02aefd892dfc0fc23afb1c6a426316ec33253a3877249b" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -8541,9 +8909,9 @@ dependencies = [ [[package]] name = "webpki-roots" -version = "0.26.0" +version = "0.26.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0de2cfda980f21be5a7ed2eadb3e6fe074d56022bea2cdeb1a62eb220fc04188" +checksum = "841c67bff177718f1d4dfefde8d8f0e78f9b6589319ba88312f567fc5841a958" dependencies = [ "rustls-pki-types", ] @@ -8562,9 +8930,9 @@ dependencies = [ [[package]] name = "whoami" -version = "1.5.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fec781d48b41f8163426ed18e8fc2864c12937df9ce54c88ede7bd47270893e" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ "redox_syscall", "wasite", @@ -8588,11 +8956,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.6" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f29e6f9198ba0d26b4c9f07dbe6f9ed633e1f3d5b8b414090084349e46a52596" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "winapi", + "windows-sys 0.59.0", ] [[package]] @@ -8603,20 +8971,41 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.51.1" +version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1f8cf84f35d2db49a46868f947758c7a1138116f7fac3bc844f43ade1292e64" +checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.48.5", + "windows-targets 0.52.6", ] [[package]] -name = "windows-sys" -version = "0.45.0" +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" dependencies = [ - "windows-targets 0.42.2", + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -8646,21 +9035,6 @@ dependencies = [ "windows-targets 0.52.6", ] -[[package]] -name = "windows-targets" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e5180c00cd44c9b1c88adb3693291f1cd93605ded80c250a75d472756b4d071" -dependencies = [ - "windows_aarch64_gnullvm 0.42.2", - "windows_aarch64_msvc 0.42.2", - "windows_i686_gnu 0.42.2", - "windows_i686_msvc 0.42.2", - "windows_x86_64_gnu 0.42.2", - "windows_x86_64_gnullvm 0.42.2", - "windows_x86_64_msvc 0.42.2", -] - [[package]] name = "windows-targets" version = "0.48.5" @@ -8692,12 +9066,6 @@ dependencies = [ "windows_x86_64_msvc 0.52.6", ] -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "597a5118570b68bc08d8d59125332c54f1ba9d9adeedeef5b99b02ba2b0698f8" - [[package]] name = "windows_aarch64_gnullvm" version = "0.48.5" @@ -8710,12 +9078,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" -[[package]] -name = "windows_aarch64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e08e8864a60f06ef0d0ff4ba04124db8b0fb3be5776a5cd47641e942e58c4d43" - [[package]] name = "windows_aarch64_msvc" version = "0.48.5" @@ -8728,12 +9090,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" -[[package]] -name = "windows_i686_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c61d927d8da41da96a81f029489353e68739737d3beca43145c8afec9a31a84f" - [[package]] name = "windows_i686_gnu" version = "0.48.5" @@ -8752,12 +9108,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" -[[package]] -name = "windows_i686_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44d840b6ec649f480a41c8d80f9c65108b92d89345dd94027bfe06ac444d1060" - [[package]] name = "windows_i686_msvc" version = "0.48.5" @@ -8770,12 +9120,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" -[[package]] -name = "windows_x86_64_gnu" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8de912b8b8feb55c064867cf047dda097f92d51efad5b491dfb98f6bbb70cb36" - [[package]] name = "windows_x86_64_gnu" version = "0.48.5" @@ -8788,12 +9132,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" -[[package]] -name = "windows_x86_64_gnullvm" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "26d41b46a36d453748aedef1486d5c7a85db22e56aff34643984ea85514e94a3" - [[package]] name = "windows_x86_64_gnullvm" version = "0.48.5" @@ -8806,12 +9144,6 @@ version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" -[[package]] -name = "windows_x86_64_msvc" -version = "0.42.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9aec5da331524158c6d1a4ac0ab1541149c0b9505fde06423b02f5ef0106b9f0" - [[package]] name = "windows_x86_64_msvc" version = "0.48.5" @@ -8826,28 +9158,27 @@ checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" -version = "0.5.17" +version = "0.5.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3b801d0e0a6726477cc207f60162da452f3a95adb368399bef20a946e06f65c" +checksum = "f593a95398737aeed53e489c785df13f3618e41dbcd6718c6addbf1395aa6876" dependencies = [ "memchr", ] [[package]] -name = "winreg" -version = "0.50.0" +name = "winnow" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ - "cfg-if", - "windows-sys 0.48.0", + "memchr", ] [[package]] name = "winreg" -version = "0.52.0" +version = "0.50.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +checksum = "524e57b2c537c0f9b1e69f1965311ec12182b4122e45035b1508cd24d2adadb1" dependencies = [ "cfg-if", "windows-sys 0.48.0", @@ -8874,20 +9205,11 @@ dependencies = [ "zeroize", ] -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "yansi" -version = "0.5.1" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" [[package]] name = "yap" @@ -8897,29 +9219,30 @@ checksum = "ff4524214bc4629eba08d78ceb1d6507070cc0bcbbed23af74e19e6e924a24cf" [[package]] name = "zerocopy" -version = "0.7.31" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1c4061bedbb353041c12f413700357bec76df2c7e2ca8e4df8bac24c6bf68e3d" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.31" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3c129550b3e6de3fd0ba67ba5c81818f9805e58b8d7fee80a3a59d2c9fc601a" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] name = "zeroize" -version = "1.7.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525b4ec142c6b68a2d10f01f7bbf6755599ca3f81ea53b8431b7dd348f5fdb2d" +checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" dependencies = [ "zeroize_derive", ] @@ -8931,8 +9254,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -9057,14 +9380,13 @@ dependencies = [ [[package]] name = "zkevm_circuits" -version = "0.140.2" +version = "0.140.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8beed4cc1ab1f9d99a694506d18705e10059534b30742832be49637c4775e1f8" +checksum = "e3c365c801e0c6eda83fbd153df45575172beb406bfb663d386f9154b4325eda" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "bincode", "boojum", - "cs_derive", "derivative", "hex", "itertools 0.10.5", @@ -9075,18 +9397,18 @@ dependencies = [ "serde_json", "smallvec", "zkevm_opcode_defs 0.132.0", + "zksync_cs_derive", ] [[package]] name = "zkevm_circuits" -version = "0.141.1" +version = "0.141.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20f1a64d256cc5f5c58d19cf976cb45973df54e4e3010ca4a3e6fafe9f06075e" +checksum = "2ccd0352e122a4e6f0046d2163b7e692e627b23fc3264faa77331a21b65833fd" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "bincode", "boojum", - "cs_derive", "derivative", "hex", "itertools 0.10.5", @@ -9097,6 +9419,7 @@ dependencies = [ "serde_json", "smallvec", "zkevm_opcode_defs 0.141.0", + "zksync_cs_derive", ] [[package]] @@ -9105,7 +9428,7 @@ version = "0.150.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784fa7cfb51e17c5ced112bca43da30b3468b2347b7af0427ad9638759fb140e" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "boojum", "derivative", "hex", @@ -9155,7 +9478,7 @@ dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", "ethereum-types", - "k256 0.13.3", + "k256 0.13.4", "lazy_static", "sha2 0.10.8", "sha3 0.10.8", @@ -9170,7 +9493,7 @@ dependencies = [ "bitflags 2.6.0", "blake2 0.10.6", "ethereum-types", - "k256 0.13.3", + "k256 0.13.4", "lazy_static", "p256", "serde", @@ -9210,7 +9533,8 @@ dependencies = [ "chrono", "ethabi", "hex", - "num_enum 0.7.2", + "num_enum 0.7.3", + "secrecy", "serde", "serde_json", "serde_with", @@ -9226,7 +9550,7 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ffa03efe9bdb137a4b36b97d1a74237e18c9ae42b755163d903a9d48c1a5d80" dependencies = [ - "arrayvec 0.7.4", + "arrayvec 0.7.6", "bit-vec", "blake2s_simd", "byteorder", @@ -9316,9 +9640,9 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" +checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" dependencies = [ "anyhow", "once_cell", @@ -9353,9 +9677,9 @@ dependencies = [ [[package]] name = "zksync_consensus_bft" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45c409ae915056cf9cadd9304dbc8718fa38edfcb346d06e5b3582dcd2489ef9" +checksum = "a1e7199c07aa14d9c3319839b98ad0496aac6e72327e70ded77ddb66329766db" dependencies = [ "anyhow", "async-trait", @@ -9375,20 +9699,18 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" +checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" dependencies = [ "anyhow", "blst", "ed25519-dalek", "elliptic-curve 0.13.8", - "ff_ce", "hex", - "k256 0.13.3", + "k256 0.13.4", "num-bigint 0.4.6", "num-traits", - "rand 0.4.6", "rand 0.8.5", "sha3 0.10.8", "thiserror", @@ -9398,9 +9720,9 @@ dependencies = [ [[package]] name = "zksync_consensus_executor" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b018b8a76fc2cbecb51683ce97532501c45d44cbc8bb856d1956e5998259335" +checksum = "db07f7329b29737d8fd6860b350c809ae1b56ad53e26a7d0eddf3664ccb9dacb" dependencies = [ "anyhow", "async-trait", @@ -9420,9 +9742,9 @@ dependencies = [ [[package]] name = "zksync_consensus_network" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5bb2988e41af3083cebfc11f47f2615adae8d829bf9237aa084dede9629a687" +checksum = "a89a2d60db1ccd41438d29724a8d0d57fcf9506eb4443ea4b9205fd78c9c8e59" dependencies = [ "anyhow", "async-trait", @@ -9430,12 +9752,12 @@ dependencies = [ "build_html", "bytesize", "http-body-util", - "hyper 1.3.1", + "hyper 1.4.1", "hyper-util", "im", "once_cell", "pin-project", - "prost 0.12.1", + "prost 0.12.6", "rand 0.8.5", "semver", "snow", @@ -9456,15 +9778,15 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" +checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" dependencies = [ "anyhow", "bit-vec", "hex", "num-bigint 0.4.6", - "prost 0.12.1", + "prost 0.12.6", "rand 0.8.5", "serde", "thiserror", @@ -9478,13 +9800,13 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9dbcb923fa201af03f49f70c11a923b416915d2ddf8b2de3a2e861f22898a4" +checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" dependencies = [ "anyhow", "async-trait", - "prost 0.12.1", + "prost 0.12.6", "rand 0.8.5", "thiserror", "tracing", @@ -9498,9 +9820,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" +checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" dependencies = [ "anyhow", "rand 0.8.5", @@ -9648,7 +9970,7 @@ checksum = "5939e2df4288c263c706ff18ac718e984149223ad4289d6d957d767dcfc04c81" dependencies = [ "proc-macro-error", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "syn 1.0.109", ] @@ -9676,7 +9998,7 @@ dependencies = [ "http 1.1.0", "jsonrpsee 0.23.2", "parity-scale-codec", - "reqwest 0.12.5", + "reqwest 0.12.7", "scale-encode", "serde", "serde_json", @@ -9719,7 +10041,7 @@ dependencies = [ "chrono", "hex", "itertools 0.10.5", - "prost 0.12.1", + "prost 0.12.6", "rand 0.8.5", "serde", "serde_json", @@ -9831,8 +10153,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "tokio", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -9842,6 +10164,7 @@ dependencies = [ "anyhow", "async-recursion", "async-trait", + "test-log", "thiserror", "tokio", "tracing", @@ -9857,12 +10180,12 @@ dependencies = [ [[package]] name = "zksync_external_node" -version = "24.24.0" +version = "24.28.0" dependencies = [ "anyhow", "assert_matches", "async-trait", - "clap 4.4.6", + "clap 4.5.18", "envy", "futures 0.3.30", "rustc_version", @@ -9920,8 +10243,9 @@ dependencies = [ "bigdecimal", "chrono", "fraction", + "httpmock", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "tokio", "url", @@ -9970,7 +10294,7 @@ dependencies = [ "num-integer", "num-traits", "proc-macro2 1.0.86", - "quote 1.0.36", + "quote 1.0.37", "serde", "syn 1.0.109", ] @@ -10065,7 +10389,7 @@ version = "0.1.0" dependencies = [ "anyhow", "assert_matches", - "clap 4.4.6", + "clap 4.5.18", "insta", "leb128", "once_cell", @@ -10100,7 +10424,7 @@ dependencies = [ "futures 0.3.30", "itertools 0.10.5", "once_cell", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "tempfile", @@ -10151,7 +10475,6 @@ dependencies = [ "once_cell", "pretty_assertions", "thiserror", - "tokio", "tracing", "vise", "zk_evm 0.131.0-rc.2", @@ -10194,10 +10517,11 @@ dependencies = [ "thiserror", "thread_local", "tokio", - "tower", + "tower 0.4.13", "tower-http", "tracing", "vise", + "zk_evm 0.150.5", "zksync_config", "zksync_consensus_roles", "zksync_contracts", @@ -10227,7 +10551,6 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", - "hex", "rand 0.8.5", "secrecy", "semver", @@ -10261,6 +10584,7 @@ dependencies = [ "zksync_test_account", "zksync_types", "zksync_utils", + "zksync_vm_executor", "zksync_vm_interface", "zksync_web3_decl", ] @@ -10372,8 +10696,8 @@ name = "zksync_node_framework_derive" version = "0.1.0" dependencies = [ "proc-macro2 1.0.86", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -10479,9 +10803,9 @@ dependencies = [ "google-cloud-auth", "google-cloud-storage", "http 1.1.0", - "prost 0.12.1", + "prost 0.12.6", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde_json", "tempfile", "tokio", @@ -10512,10 +10836,10 @@ dependencies = [ "anyhow", "axum", "chrono", - "hyper 1.3.1", + "hyper 1.4.1", "serde_json", "tokio", - "tower", + "tower 0.4.13", "tracing", "vise", "zksync_basic_types", @@ -10530,14 +10854,14 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" +checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" dependencies = [ "anyhow", "bit-vec", "once_cell", - "prost 0.12.1", + "prost 0.12.6", "prost-reflect", "quick-protobuf", "rand 0.8.5", @@ -10551,9 +10875,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" +checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" dependencies = [ "anyhow", "heck 0.5.0", @@ -10562,8 +10886,8 @@ dependencies = [ "prost-build", "prost-reflect", "protox", - "quote 1.0.36", - "syn 2.0.72", + "quote 1.0.37", + "syn 2.0.77", ] [[package]] @@ -10572,7 +10896,7 @@ version = "0.1.0" dependencies = [ "anyhow", "hex", - "prost 0.12.1", + "prost 0.12.6", "rand 0.8.5", "secrecy", "serde_json", @@ -10640,7 +10964,7 @@ name = "zksync_server" version = "0.1.0" dependencies = [ "anyhow", - "clap 4.4.6", + "clap 4.5.18", "futures 0.3.30", "serde_json", "tikv-jemallocator", @@ -10754,6 +11078,7 @@ dependencies = [ "hex", "itertools 0.10.5", "once_cell", + "rand 0.8.5", "tempfile", "test-casing", "thiserror", @@ -10810,7 +11135,7 @@ dependencies = [ "anyhow", "async-trait", "envy", - "reqwest 0.12.5", + "reqwest 0.12.7", "secp256k1", "serde", "thiserror", @@ -10889,17 +11214,18 @@ dependencies = [ "bincode", "blake2 0.10.6", "chrono", - "derive_more 1.0.0-beta.6", + "derive_more 1.0.0", "hex", "itertools 0.10.5", "num", - "num_enum 0.7.2", + "num_enum 0.7.3", "once_cell", - "prost 0.12.1", + "prost 0.12.6", "rlp", "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tokio", @@ -10928,7 +11254,7 @@ dependencies = [ "num", "once_cell", "rand 0.8.5", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "thiserror", @@ -10966,8 +11292,8 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "enum_dispatch", "primitive-types", @@ -10978,8 +11304,8 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "primitive-types", ] @@ -11009,6 +11335,7 @@ dependencies = [ "assert_matches", "async-trait", "hex", + "pretty_assertions", "serde", "serde_json", "thiserror", @@ -11031,6 +11358,7 @@ dependencies = [ "once_cell", "rand 0.8.5", "serde", + "serde_json", "tempfile", "test-casing", "tokio", @@ -11064,7 +11392,7 @@ dependencies = [ "pin-project-lite", "rand 0.8.5", "rlp", - "rustls 0.23.10", + "rustls 0.23.13", "serde", "serde_json", "test-casing", @@ -11078,9 +11406,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index 8498751a599..31f61f2b2d5 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,7 +117,7 @@ codegen = "0.2.0" criterion = "0.4.0" ctrlc = "3.1" dashmap = "5.5.3" -derive_more = "=1.0.0-beta.6" +derive_more = "1.0.0" envy = "0.4" ethabi = "18.0.0" flate2 = "1.0.28" @@ -128,6 +128,7 @@ google-cloud-storage = "0.20.0" governor = "0.4.2" hex = "0.4" http = "1.1" +httpmock = "0.7.0" hyper = "1.3" iai = "0.1" insta = "1.29.0" @@ -227,19 +228,19 @@ zk_evm_1_4_1 = { package = "zk_evm", version = "0.141" } zk_evm_1_5_0 = { package = "zk_evm", version = "=0.150.5" } # New VM; pinned to a specific commit because of instability -zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "cd6136c42ec56856e0abcf2a98d1a9e120161482" } +zksync_vm2 = { git = "https://github.com/matter-labs/vm2.git", rev = "a233d44bbe61dc6a758a754c3b78fe4f83e56699" } # Consensus dependencies. -zksync_concurrency = "=0.1.1" -zksync_consensus_bft = "=0.1.1" -zksync_consensus_crypto = "=0.1.1" -zksync_consensus_executor = "=0.1.1" -zksync_consensus_network = "=0.1.1" -zksync_consensus_roles = "=0.1.1" -zksync_consensus_storage = "=0.1.1" -zksync_consensus_utils = "=0.1.1" -zksync_protobuf = "=0.1.1" -zksync_protobuf_build = "=0.1.1" +zksync_concurrency = "=0.3.0" +zksync_consensus_bft = "=0.3.0" +zksync_consensus_crypto = "=0.3.0" +zksync_consensus_executor = "=0.3.0" +zksync_consensus_network = "=0.3.0" +zksync_consensus_roles = "=0.3.0" +zksync_consensus_storage = "=0.3.0" +zksync_consensus_utils = "=0.3.0" +zksync_protobuf = "=0.3.0" +zksync_protobuf_build = "=0.3.0" # "Local" dependencies zksync_multivm = { version = "0.1.0", path = "core/lib/multivm" } diff --git a/README.md b/README.md index 013d932aa1a..ce73242f11e 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ The following questions will be answered by the following resources: | What do I need to develop the project locally? | [development.md](docs/guides/development.md) | | How can I set up my dev environment? | [setup-dev.md](docs/guides/setup-dev.md) | | How can I run the project? | [launch.md](docs/guides/launch.md) | +| How can I build Docker images? | [build-docker.md](docs/guides/build-docker.md) | | What is the logical project structure and architecture? | [architecture.md](docs/guides/architecture.md) | | Where can I find protocol specs? | [specs.md](docs/specs/README.md) | | Where can I find developer docs? | [docs](https://docs.zksync.io) | diff --git a/bin/zkt b/bin/zkt index 4736401a29d..f781ca67528 100755 --- a/bin/zkt +++ b/bin/zkt @@ -5,9 +5,11 @@ cd $(dirname $0) if which zkup >/dev/null; then cargo uninstall zk_inception cargo uninstall zk_supervisor + git config --local core.hooksPath || + git config --local core.hooksPath ./.githooks zkup -p .. --alias else - echo zkup does not installed, please install it https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup + echo zkup is not installed, please install it https://github.com/matter-labs/zksync-era/tree/main/zk_toolbox/zkup cd ../zk_toolbox cargo install --path ./crates/zk_inception --force cargo install --path ./crates/zk_supervisor --force diff --git a/contracts b/contracts index 071aa0e4e9d..3ec30fa71cb 160000 --- a/contracts +++ b/contracts @@ -1 +1 @@ -Subproject commit 071aa0e4e9d98f7869c38d88792386bf639b901d +Subproject commit 3ec30fa71cb123d2b25ade27a8248d0b320e505f diff --git a/core/CHANGELOG.md b/core/CHANGELOG.md index 7d4381b09be..b2f27a6630c 100644 --- a/core/CHANGELOG.md +++ b/core/CHANGELOG.md @@ -1,5 +1,86 @@ # Changelog +## [24.28.0](https://github.com/matter-labs/zksync-era/compare/core-v24.27.0...core-v24.28.0) (2024-10-02) + + +### Features + +* **da-clients:** add secrets ([#2954](https://github.com/matter-labs/zksync-era/issues/2954)) ([f4631e4](https://github.com/matter-labs/zksync-era/commit/f4631e4466de620cc1401b326d864cdb8b48a05d)) +* **eth-sender:** add a cap to time_in_mempool ([#2978](https://github.com/matter-labs/zksync-era/issues/2978)) ([650d42f](https://github.com/matter-labs/zksync-era/commit/650d42fea6124d80b60a8270a303d72ad6ac741e)) +* **eth-watch:** redesign to support multiple chains ([#2867](https://github.com/matter-labs/zksync-era/issues/2867)) ([aa72d84](https://github.com/matter-labs/zksync-era/commit/aa72d849c24a664acd083eba73795ddc5d31d55f)) +* Expose http debug page ([#2952](https://github.com/matter-labs/zksync-era/issues/2952)) ([e0b6488](https://github.com/matter-labs/zksync-era/commit/e0b64888aae7324aec2d40fa0cd51ea7e1450cd9)) +* **zk_toolbox:** add fees integration test to toolbox ([#2898](https://github.com/matter-labs/zksync-era/issues/2898)) ([e7ead76](https://github.com/matter-labs/zksync-era/commit/e7ead760ce0417dd36af3839ac557f7e9ab238a4)) +* **zk_toolbox:** Add SQL format for zk supervisor ([#2950](https://github.com/matter-labs/zksync-era/issues/2950)) ([540e5d7](https://github.com/matter-labs/zksync-era/commit/540e5d7554f54e80d52f1bfae37e03ca8f787baf)) + + +### Bug Fixes + +* **api:** Fix batch fee input for `debug` namespace ([#2948](https://github.com/matter-labs/zksync-era/issues/2948)) ([79b6fcf](https://github.com/matter-labs/zksync-era/commit/79b6fcf8b5d10a0ccdceb846370dd6870b6a32b5)) +* chainstack block limit exceeded ([#2974](https://github.com/matter-labs/zksync-era/issues/2974)) ([4ffbf42](https://github.com/matter-labs/zksync-era/commit/4ffbf426de166c11aaf5d7b5ed7d199644fba229)) +* **eth-watch:** add missing check that from_block is not larger than finalized_block ([#2969](https://github.com/matter-labs/zksync-era/issues/2969)) ([3f406c7](https://github.com/matter-labs/zksync-era/commit/3f406c7d0c0e76d798c2d838abde57ca692822c0)) +* ignore unknown fields in rpc json response ([#2962](https://github.com/matter-labs/zksync-era/issues/2962)) ([692ea73](https://github.com/matter-labs/zksync-era/commit/692ea73f75a5fb9db2b4ac33ad24d20568638742)) + + +### Performance Improvements + +* **api:** More efficient gas estimation ([#2937](https://github.com/matter-labs/zksync-era/issues/2937)) ([3b69e37](https://github.com/matter-labs/zksync-era/commit/3b69e37e470dab859a55787f6cc971e7083de2fd)) + +## [24.27.0](https://github.com/matter-labs/zksync-era/compare/core-v24.26.0...core-v24.27.0) (2024-09-25) + + +### Features + +* **vm:** Split old and new VM implementations ([#2915](https://github.com/matter-labs/zksync-era/issues/2915)) ([93bc66f](https://github.com/matter-labs/zksync-era/commit/93bc66f21f9f67a440f06f1c4402e0d687698741)) + + +### Bug Fixes + +* **api:** Return correct flat call tracer ([#2917](https://github.com/matter-labs/zksync-era/issues/2917)) ([218646a](https://github.com/matter-labs/zksync-era/commit/218646aa1c56200f4ffee99b7f83366e2689354f)) + +## [24.26.0](https://github.com/matter-labs/zksync-era/compare/core-v24.25.0...core-v24.26.0) (2024-09-23) + + +### Features + +* added seed_peers to consensus global config ([#2920](https://github.com/matter-labs/zksync-era/issues/2920)) ([e9d1d90](https://github.com/matter-labs/zksync-era/commit/e9d1d905f1ce86f9de2cf39d79be4b5aada4a81d)) +* **circuit_prover:** Add circuit prover ([#2908](https://github.com/matter-labs/zksync-era/issues/2908)) ([48317e6](https://github.com/matter-labs/zksync-era/commit/48317e640a00b016bf7bf782cc94fccaf077ed6d)) +* **prover:** Add endpoint to PJM to get queue reports ([#2918](https://github.com/matter-labs/zksync-era/issues/2918)) ([2cec83f](https://github.com/matter-labs/zksync-era/commit/2cec83f26e0b9309387135ca43718af4fcd6f6b1)) +* **vm:** Do not panic on VM divergence ([#2705](https://github.com/matter-labs/zksync-era/issues/2705)) ([7aa5721](https://github.com/matter-labs/zksync-era/commit/7aa5721d22e253d05d369a60d5bcacbf52021c48)) +* **vm:** Extract oneshot VM executor – environment types ([#2885](https://github.com/matter-labs/zksync-era/issues/2885)) ([a2d4126](https://github.com/matter-labs/zksync-era/commit/a2d4126f9e0c9dc46f49a861549d076fdbcf66d3)) + + +### Bug Fixes + +* **api_server:** fix blob_gas length ([#2673](https://github.com/matter-labs/zksync-era/issues/2673)) ([44a8f79](https://github.com/matter-labs/zksync-era/commit/44a8f79739704e1216a7b34a0580ad7ba1cce3bd)) +* **eth-sender:** print better error message in case of missing blob prices ([#2927](https://github.com/matter-labs/zksync-era/issues/2927)) ([38fc824](https://github.com/matter-labs/zksync-era/commit/38fc824f75e8b0e84f10348d1502fc8a26d12015)) + +## [24.25.0](https://github.com/matter-labs/zksync-era/compare/core-v24.24.0...core-v24.25.0) (2024-09-19) + + +### Features + +* (DB migration) Rename recursion_scheduler_level_vk_hash to snark_wrapper_vk_hash ([#2809](https://github.com/matter-labs/zksync-era/issues/2809)) ([64f9551](https://github.com/matter-labs/zksync-era/commit/64f95514c99f95da2a19a97ff064c29a97efc22f)) +* add da clients ([#2743](https://github.com/matter-labs/zksync-era/issues/2743)) ([9218612](https://github.com/matter-labs/zksync-era/commit/9218612fdb2b63c20841e2e2e5a45bbd23c01fbc)) +* attester committees data extractor (BFT-434) ([#2684](https://github.com/matter-labs/zksync-era/issues/2684)) ([92dde03](https://github.com/matter-labs/zksync-era/commit/92dde039ee8a0bc08e2019b7fa6f243a34d9816f)) +* emit errors in prover API metrics ([#2890](https://github.com/matter-labs/zksync-era/issues/2890)) ([2ac7cc5](https://github.com/matter-labs/zksync-era/commit/2ac7cc5836e69fc82c98df2005fedee01c1084e1)) +* **en:** Resume incomplete snapshot in snapshot creator in more cases ([#2886](https://github.com/matter-labs/zksync-era/issues/2886)) ([f095b4a](https://github.com/matter-labs/zksync-era/commit/f095b4a3223222ac712de53592fe1e68f766600f)) +* make `to` address optional for transaction data ([#2852](https://github.com/matter-labs/zksync-era/issues/2852)) ([8363c1d](https://github.com/matter-labs/zksync-era/commit/8363c1d8697ad9bd2fe5d326218476bc3dad38af)) +* **prover:** Optimize setup keys loading ([#2847](https://github.com/matter-labs/zksync-era/issues/2847)) ([19887ef](https://github.com/matter-labs/zksync-era/commit/19887ef21a8bbd26977353f8ee277b711850dfd2)) +* Selector generator tool ([#2844](https://github.com/matter-labs/zksync-era/issues/2844)) ([b359b08](https://github.com/matter-labs/zksync-era/commit/b359b085895da6582f1d28722107bc5b25f1232c)) +* **tee:** use hex serialization for RPC responses ([#2887](https://github.com/matter-labs/zksync-era/issues/2887)) ([abe0440](https://github.com/matter-labs/zksync-era/commit/abe0440811ae4daf4a0f307922a282e9664308e0)) +* **utils:** Rework locate_workspace, introduce Workspace type ([#2830](https://github.com/matter-labs/zksync-era/issues/2830)) ([d256092](https://github.com/matter-labs/zksync-era/commit/d2560928cc67b40a97a5497ac8542915bf6f91a9)) +* **zk_toolbox:** Add external_node consensus support ([#2821](https://github.com/matter-labs/zksync-era/issues/2821)) ([4a10d7d](https://github.com/matter-labs/zksync-era/commit/4a10d7d9554d6c1aa2f4fc46557d40baaad8ff2f)) + + +### Bug Fixes + +* count SECP256 precompile to account validation gas limit as well ([#2859](https://github.com/matter-labs/zksync-era/issues/2859)) ([fee0c2a](https://github.com/matter-labs/zksync-era/commit/fee0c2ad08a5ab4a04252765b367eb9fbb1f3db7)) +* **en:** Fix connection starvation during snapshot recovery ([#2836](https://github.com/matter-labs/zksync-era/issues/2836)) ([52f4f76](https://github.com/matter-labs/zksync-era/commit/52f4f763674d25f8a5e7f3a111354a559f798d52)) +* **eth_watch:** fix `get_events_inner` ([#2882](https://github.com/matter-labs/zksync-era/issues/2882)) ([c957dd8](https://github.com/matter-labs/zksync-era/commit/c957dd8011213e0e95fa5962e2310321b29a0d16)) +* handling of HTTP 403 thrown by proxyd ([#2835](https://github.com/matter-labs/zksync-era/issues/2835)) ([2d71c74](https://github.com/matter-labs/zksync-era/commit/2d71c7408a0eed3662fc51f70fa9f525d66e4c6f)) +* **state-keeper:** Restore processed tx metrics in state keeper ([#2815](https://github.com/matter-labs/zksync-era/issues/2815)) ([4d8862b](https://github.com/matter-labs/zksync-era/commit/4d8862b76a55ac78edd481694fefd2107736ffd9)) +* **tee-prover:** fix deserialization of `std::time::Duration` in `envy` config ([#2817](https://github.com/matter-labs/zksync-era/issues/2817)) ([df8641a](https://github.com/matter-labs/zksync-era/commit/df8641a912a8d480ceecff58b0bfaef05e04f0c8)) + ## [24.24.0](https://github.com/matter-labs/zksync-era/compare/core-v24.23.0...core-v24.24.0) (2024-09-05) diff --git a/core/bin/block_reverter/src/main.rs b/core/bin/block_reverter/src/main.rs index 65810a6e9b6..64f3a4825e8 100644 --- a/core/bin/block_reverter/src/main.rs +++ b/core/bin/block_reverter/src/main.rs @@ -20,7 +20,7 @@ use zksync_config::{ }, ContractsConfig, DBConfig, EthConfig, GenesisConfig, PostgresConfig, }; -use zksync_core_leftovers::temp_config_store::decode_yaml_repr; +use zksync_core_leftovers::temp_config_store::read_yaml_repr; use zksync_dal::{ConnectionPool, Core}; use zksync_env_config::{object_store::SnapshotsObjectStoreConfig, FromEnv}; use zksync_object_store::ObjectStoreFactory; @@ -127,27 +127,26 @@ async fn main() -> anyhow::Result<()> { .build(); let general_config: Option = if let Some(path) = opts.config_path { - let yaml = std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - let config = - decode_yaml_repr::(&yaml) - .context("failed decoding general YAML config")?; - Some(config) + Some( + read_yaml_repr::(&path) + .context("failed decoding general YAML config")?, + ) } else { None }; let wallets_config: Option = if let Some(path) = opts.wallets_path { - let yaml = std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - let config = decode_yaml_repr::(&yaml) - .context("failed decoding wallets YAML config")?; - Some(config) + Some( + read_yaml_repr::(&path) + .context("failed decoding wallets YAML config")?, + ) } else { None }; let genesis_config: Option = if let Some(path) = opts.genesis_path { - let yaml = std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - let config = decode_yaml_repr::(&yaml) - .context("failed decoding genesis YAML config")?; - Some(config) + Some( + read_yaml_repr::(&path) + .context("failed decoding genesis YAML config")?, + ) } else { None }; @@ -183,19 +182,15 @@ async fn main() -> anyhow::Result<()> { .context("BasicWitnessInputProducerConfig::from_env()")?, }; let contracts = match opts.contracts_config_path { - Some(path) => { - let yaml = - std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - decode_yaml_repr::(&yaml) - .context("failed decoding contracts YAML config")? - } + Some(path) => read_yaml_repr::(&path) + .context("failed decoding contracts YAML config")?, None => ContractsConfig::from_env().context("ContractsConfig::from_env()")?, }; let secrets_config = if let Some(path) = opts.secrets_path { - let yaml = std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - let config = decode_yaml_repr::(&yaml) - .context("failed decoding secrets YAML config")?; - Some(config) + Some( + read_yaml_repr::(&path) + .context("failed decoding secrets YAML config")?, + ) } else { None }; diff --git a/core/bin/external_node/Cargo.toml b/core/bin/external_node/Cargo.toml index a1d3951ff3d..086d381ecc3 100644 --- a/core/bin/external_node/Cargo.toml +++ b/core/bin/external_node/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "zksync_external_node" description = "Non-validator ZKsync node" -version = "24.24.0" # x-release-please-version +version = "24.28.0" # x-release-please-version edition.workspace = true authors.workspace = true homepage.workspace = true diff --git a/core/bin/external_node/src/config/mod.rs b/core/bin/external_node/src/config/mod.rs index f8241deae26..56ee3edfd25 100644 --- a/core/bin/external_node/src/config/mod.rs +++ b/core/bin/external_node/src/config/mod.rs @@ -19,7 +19,7 @@ use zksync_config::{ }; use zksync_consensus_crypto::TextFmt; use zksync_consensus_roles as roles; -use zksync_core_leftovers::temp_config_store::{decode_yaml_repr, read_yaml_repr}; +use zksync_core_leftovers::temp_config_store::read_yaml_repr; #[cfg(test)] use zksync_dal::{ConnectionPool, Core}; use zksync_metadata_calculator::MetadataCalculatorRecoveryConfig; @@ -327,6 +327,10 @@ pub(crate) struct OptionalENConfig { /// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. #[serde(default = "OptionalENConfig::default_estimate_gas_acceptable_overestimation")] pub estimate_gas_acceptable_overestimation: u32, + /// Enables optimizations for the binary search of the gas limit in `eth_estimateGas`. These optimizations are currently + /// considered experimental. + #[serde(default)] + pub estimate_gas_optimize_search: bool, /// The multiplier to use when suggesting gas price. Should be higher than one, /// otherwise if the L1 prices soar, the suggested gas price won't be sufficient to be included in block. #[serde(default = "OptionalENConfig::default_gas_price_scale_factor")] @@ -441,6 +445,8 @@ pub(crate) struct OptionalENConfig { /// Gateway RPC URL, needed for operating during migration. #[allow(dead_code)] pub gateway_url: Option, + /// Interval for bridge addresses refreshing in seconds. + bridge_addresses_refresh_interval_sec: Option, } impl OptionalENConfig { @@ -558,6 +564,11 @@ impl OptionalENConfig { web3_json_rpc.estimate_gas_acceptable_overestimation, default_estimate_gas_acceptable_overestimation ), + estimate_gas_optimize_search: general_config + .api_config + .as_ref() + .map(|a| a.web3_json_rpc.estimate_gas_optimize_search) + .unwrap_or_default(), gas_price_scale_factor: load_config_or_default!( general_config.api_config, web3_json_rpc.gas_price_scale_factor, @@ -666,6 +677,7 @@ impl OptionalENConfig { api_namespaces, contracts_diamond_proxy_addr: None, gateway_url: enconfig.gateway_url.clone(), + bridge_addresses_refresh_interval_sec: enconfig.bridge_addresses_refresh_interval_sec, }) } @@ -892,6 +904,11 @@ impl OptionalENConfig { Duration::from_secs(self.pruning_data_retention_sec) } + pub fn bridge_addresses_refresh_interval(&self) -> Option { + self.bridge_addresses_refresh_interval_sec + .map(|n| Duration::from_secs(n.get())) + } + #[cfg(test)] fn mock() -> Self { // Set all values to their defaults @@ -1149,9 +1166,8 @@ pub(crate) fn read_consensus_secrets() -> anyhow::Result(&cfg) + read_yaml_repr::(&path.into()) .context("failed decoding YAML")?, )) } @@ -1160,9 +1176,8 @@ pub(crate) fn read_consensus_config() -> anyhow::Result> let Ok(path) = env::var("EN_CONSENSUS_CONFIG_PATH") else { return Ok(None); }; - let cfg = std::fs::read_to_string(&path).context(path)?; Ok(Some( - decode_yaml_repr::(&cfg).context("failed decoding YAML")?, + read_yaml_repr::(&path.into()).context("failed decoding YAML")?, )) } @@ -1253,16 +1268,16 @@ impl ExternalNodeConfig<()> { secrets_configs_path: PathBuf, consensus_config_path: Option, ) -> anyhow::Result { - let general_config = read_yaml_repr::(general_config_path) + let general_config = read_yaml_repr::(&general_config_path) .context("failed decoding general YAML config")?; let external_node_config = - read_yaml_repr::(external_node_config_path) + read_yaml_repr::(&external_node_config_path) .context("failed decoding external node YAML config")?; - let secrets_config = read_yaml_repr::(secrets_configs_path) + let secrets_config = read_yaml_repr::(&secrets_configs_path) .context("failed decoding secrets YAML config")?; let consensus = consensus_config_path - .map(read_yaml_repr::) + .map(|path| read_yaml_repr::(&path)) .transpose() .context("failed decoding consensus YAML config")?; let consensus_secrets = secrets_config.consensus.clone(); @@ -1382,6 +1397,7 @@ impl From<&ExternalNodeConfig> for InternalApiConfig { estimate_gas_acceptable_overestimation: config .optional .estimate_gas_acceptable_overestimation, + estimate_gas_optimize_search: config.optional.estimate_gas_optimize_search, bridge_addresses: BridgeAddresses { l1_erc20_default_bridge: config.remote.l1_erc20_bridge_proxy_addr, l2_erc20_default_bridge: config.remote.l2_erc20_bridge_addr, diff --git a/core/bin/external_node/src/config/observability.rs b/core/bin/external_node/src/config/observability.rs index 0dd83f3bd35..91b721bf77c 100644 --- a/core/bin/external_node/src/config/observability.rs +++ b/core/bin/external_node/src/config/observability.rs @@ -95,11 +95,10 @@ impl ObservabilityENConfig { ) }) .transpose()?; - let guard = zksync_vlog::ObservabilityBuilder::new() + zksync_vlog::ObservabilityBuilder::new() .with_logs(Some(logs)) .with_sentry(sentry) - .build(); - Ok(guard) + .try_build() } pub(crate) fn from_configs(general_config: &GeneralConfig) -> anyhow::Result { diff --git a/core/bin/external_node/src/config/tests.rs b/core/bin/external_node/src/config/tests.rs index 43210a76572..a32be3eff72 100644 --- a/core/bin/external_node/src/config/tests.rs +++ b/core/bin/external_node/src/config/tests.rs @@ -63,7 +63,10 @@ fn parsing_observability_config() { fn using_unset_sentry_url() { let env_vars = MockEnvironment::new(&[("MISC_SENTRY_URL", "unset")]); let config = ObservabilityENConfig::new(&env_vars).unwrap(); - config.build_observability().unwrap(); + if let Err(err) = config.build_observability() { + // Global tracer may be installed by another test, but the logic shouldn't fail before that. + assert!(format!("{err:?}").contains("global tracer"), "{err:?}"); + } } #[test] diff --git a/core/bin/external_node/src/node_builder.rs b/core/bin/external_node/src/node_builder.rs index 98e286c253a..14e09b9c2a7 100644 --- a/core/bin/external_node/src/node_builder.rs +++ b/core/bin/external_node/src/node_builder.rs @@ -12,7 +12,7 @@ use zksync_config::{ PostgresConfig, }; use zksync_metadata_calculator::{MetadataCalculatorConfig, MetadataCalculatorRecoveryConfig}; -use zksync_node_api_server::{tx_sender::ApiContracts, web3::Namespace}; +use zksync_node_api_server::web3::Namespace; use zksync_node_framework::{ implementations::layers::{ batch_status_updater::BatchStatusUpdaterLayer, @@ -380,12 +380,10 @@ impl ExternalNodeBuilder { latest_values_cache_size: self.config.optional.latest_values_cache_size() as u64, }; let max_vm_concurrency = self.config.optional.vm_concurrency_limit; - let api_contracts = ApiContracts::load_from_disk_blocking(); // TODO (BFT-138): Allow to dynamically reload API contracts; let tx_sender_layer = TxSenderLayer::new( (&self.config).into(), postgres_storage_config, max_vm_concurrency, - api_contracts, ) .with_whitelisted_tokens_for_aa_cache(true); @@ -432,6 +430,10 @@ impl ExternalNodeBuilder { response_body_size_limit: Some(self.config.optional.max_response_body_size()), with_extended_tracing: self.config.optional.extended_rpc_tracing, pruning_info_refresh_interval: Some(pruning_info_refresh_interval), + bridge_addresses_refresh_interval: self + .config + .optional + .bridge_addresses_refresh_interval(), polling_interval: Some(self.config.optional.polling_interval()), websocket_requests_per_minute_limit: None, // To be set by WS server layer method if required. replication_lag_limit: None, // TODO: Support replication lag limit diff --git a/core/bin/external_node/src/tests/mod.rs b/core/bin/external_node/src/tests/mod.rs index 5e9e7b3eeb3..b21dbd0db9a 100644 --- a/core/bin/external_node/src/tests/mod.rs +++ b/core/bin/external_node/src/tests/mod.rs @@ -21,7 +21,7 @@ const POLL_INTERVAL: Duration = Duration::from_millis(100); #[tokio::test] #[tracing::instrument] // Add args to the test logs async fn external_node_basics(components_str: &'static str) { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, env_handles) = utils::TestEnvironment::with_genesis_block(components_str).await; @@ -92,7 +92,7 @@ async fn external_node_basics(components_str: &'static str) { #[tokio::test] async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, env_handles) = utils::TestEnvironment::with_genesis_block("core").await; let l2_client = utils::mock_l2_client_hanging(); @@ -128,7 +128,7 @@ async fn node_reacts_to_stop_signal_during_initial_reorg_detection() { #[tokio::test] async fn running_tree_without_core_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("tree").await; let l2_client = utils::mock_l2_client(&env); @@ -165,7 +165,7 @@ async fn running_tree_without_core_is_not_allowed() { #[tokio::test] async fn running_tree_api_without_tree_is_not_allowed() { - let _guard = zksync_vlog::ObservabilityBuilder::new().build(); // Enable logging to simplify debugging + let _guard = zksync_vlog::ObservabilityBuilder::new().try_build().ok(); // Enable logging to simplify debugging let (env, _env_handles) = utils::TestEnvironment::with_genesis_block("core,tree_api").await; let l2_client = utils::mock_l2_client(&env); diff --git a/core/bin/genesis_generator/src/main.rs b/core/bin/genesis_generator/src/main.rs index abdd6091ed7..4f8200b3af7 100644 --- a/core/bin/genesis_generator/src/main.rs +++ b/core/bin/genesis_generator/src/main.rs @@ -9,7 +9,7 @@ use clap::Parser; use serde_yaml::Serializer; use zksync_config::{configs::DatabaseSecrets, GenesisConfig}; use zksync_contracts::BaseSystemContracts; -use zksync_core_leftovers::temp_config_store::decode_yaml_repr; +use zksync_core_leftovers::temp_config_store::read_yaml_repr; use zksync_dal::{ConnectionPool, Core, CoreDal}; use zksync_env_config::FromEnv; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; @@ -40,17 +40,13 @@ async fn main() -> anyhow::Result<()> { let database_secrets = match opt.config_path { None => DatabaseSecrets::from_env()?, Some(path) => { - let yaml = - std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - let config = decode_yaml_repr::(&yaml) - .context("failed decoding general YAML config")?; + let config = read_yaml_repr::(&path) + .context("failed decoding secrets YAML")?; config.database.context("Database secrets must exist")? } }; - let yaml = std::fs::read_to_string(DEFAULT_GENESIS_FILE_PATH) - .with_context(|| DEFAULT_GENESIS_FILE_PATH.to_string())?; - let original_genesis = decode_yaml_repr::(&yaml)?; + let original_genesis = read_yaml_repr::(&DEFAULT_GENESIS_FILE_PATH.into())?; let db_url = database_secrets.master_url()?; let new_genesis = generate_new_config(db_url, original_genesis.clone()).await?; if opt.check { diff --git a/core/bin/system-constants-generator/src/utils.rs b/core/bin/system-constants-generator/src/utils.rs index e596208a794..43ac9841c40 100644 --- a/core/bin/system-constants-generator/src/utils.rs +++ b/core/bin/system-constants-generator/src/utils.rs @@ -89,7 +89,7 @@ pub(super) fn get_l2_tx( pubdata_price: u32, ) -> L2Tx { L2Tx::new_signed( - contract_address, + Some(contract_address), vec![], Nonce(0), Fee { @@ -134,7 +134,7 @@ pub(super) fn get_l1_tx( ) -> L1Tx { L1Tx { execute: Execute { - contract_address, + contract_address: Some(contract_address), calldata: custom_calldata.unwrap_or_default(), value: U256::from(0), factory_deps, @@ -262,7 +262,7 @@ pub(super) fn execute_internal_transfer_test() -> u32 { } .into_tracer_pointer(); let mut vm: Vm<_, HistoryEnabled> = Vm::new(l1_batch, system_env, storage_view.to_rc_ptr()); - let result = vm.inspect(tracer.into(), VmExecutionMode::Bootloader); + let result = vm.inspect(&mut tracer.into(), VmExecutionMode::Bootloader); assert!(!result.result.is_failed(), "The internal call has reverted"); tracer_result.take() diff --git a/core/bin/zksync_server/src/config.rs b/core/bin/zksync_server/src/config.rs index 8c865b61d5b..c55cc779634 100644 --- a/core/bin/zksync_server/src/config.rs +++ b/core/bin/zksync_server/src/config.rs @@ -1,6 +1,6 @@ use anyhow::Context as _; use zksync_config::configs::consensus::{ConsensusConfig, ConsensusSecrets}; -use zksync_core_leftovers::temp_config_store::decode_yaml_repr; +use zksync_core_leftovers::temp_config_store::read_yaml_repr; use zksync_protobuf_config::proto; pub(crate) fn read_consensus_secrets() -> anyhow::Result> { @@ -8,9 +8,8 @@ pub(crate) fn read_consensus_secrets() -> anyhow::Result(&secrets) + read_yaml_repr::(&path.into()) .context("failed decoding YAML")?, )) } @@ -20,8 +19,7 @@ pub(crate) fn read_consensus_config() -> anyhow::Result> let Ok(path) = std::env::var("CONSENSUS_CONFIG_PATH") else { return Ok(None); }; - let cfg = std::fs::read_to_string(&path).context(path)?; Ok(Some( - decode_yaml_repr::(&cfg).context("failed decoding YAML")?, + read_yaml_repr::(&path.into()).context("failed decoding YAML")?, )) } diff --git a/core/bin/zksync_server/src/main.rs b/core/bin/zksync_server/src/main.rs index 84898d6da06..da0a93f624d 100644 --- a/core/bin/zksync_server/src/main.rs +++ b/core/bin/zksync_server/src/main.rs @@ -11,6 +11,7 @@ use zksync_config::{ }, fri_prover_group::FriProverGroupConfig, house_keeper::HouseKeeperConfig, + secrets::DataAvailabilitySecrets, BasicWitnessInputProducerConfig, ContractsConfig, DatabaseSecrets, ExperimentalVmConfig, ExternalPriceApiClientConfig, FriProofCompressorConfig, FriProverConfig, FriProverGatewayConfig, FriWitnessGeneratorConfig, FriWitnessVectorGeneratorConfig, @@ -22,7 +23,7 @@ use zksync_config::{ GenesisConfig, ObjectStoreConfig, PostgresConfig, SnapshotsCreatorConfig, }; use zksync_core_leftovers::{ - temp_config_store::{decode_yaml_repr, TempConfigStore}, + temp_config_store::{read_yaml_repr, TempConfigStore}, Component, Components, }; use zksync_env_config::FromEnv; @@ -98,55 +99,38 @@ fn main() -> anyhow::Result<()> { configs } Some(path) => { - let yaml = - std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - decode_yaml_repr::(&yaml) + read_yaml_repr::(&path) .context("failed decoding general YAML config")? } }; let wallets = match opt.wallets_path { None => tmp_config.wallets(), - Some(path) => { - let yaml = - std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - decode_yaml_repr::(&yaml) - .context("failed decoding wallets YAML config")? - } + Some(path) => read_yaml_repr::(&path) + .context("failed decoding wallets YAML config")?, }; let secrets: Secrets = match opt.secrets_path { - Some(path) => { - let yaml = - std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - decode_yaml_repr::(&yaml) - .context("failed decoding secrets YAML config")? - } + Some(path) => read_yaml_repr::(&path) + .context("failed decoding secrets YAML config")?, None => Secrets { consensus: config::read_consensus_secrets().context("read_consensus_secrets()")?, database: DatabaseSecrets::from_env().ok(), l1: L1Secrets::from_env().ok(), + data_availability: DataAvailabilitySecrets::from_env().ok(), }, }; let contracts_config = match opt.contracts_config_path { None => ContractsConfig::from_env().context("contracts_config")?, - Some(path) => { - let yaml = - std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - decode_yaml_repr::(&yaml) - .context("failed decoding contracts YAML config")? - } + Some(path) => read_yaml_repr::(&path) + .context("failed decoding contracts YAML config")?, }; let genesis = match opt.genesis_path { None => GenesisConfig::from_env().context("Genesis config")?, - Some(path) => { - let yaml = - std::fs::read_to_string(&path).with_context(|| path.display().to_string())?; - decode_yaml_repr::(&yaml) - .context("failed decoding genesis YAML config")? - } + Some(path) => read_yaml_repr::(&path) + .context("failed decoding genesis YAML config")?, }; let observability_config = configs .observability diff --git a/core/bin/zksync_server/src/node_builder.rs b/core/bin/zksync_server/src/node_builder.rs index cfc499a7cdc..894272dad84 100644 --- a/core/bin/zksync_server/src/node_builder.rs +++ b/core/bin/zksync_server/src/node_builder.rs @@ -4,15 +4,15 @@ use anyhow::Context; use zksync_config::{ configs::{ - da_client::DAClient, eth_sender::PubdataSendingMode, wallets::Wallets, GeneralConfig, - Secrets, + da_client::DAClientConfig, eth_sender::PubdataSendingMode, + secrets::DataAvailabilitySecrets, wallets::Wallets, GeneralConfig, Secrets, }, ContractsConfig, GenesisConfig, }; use zksync_core_leftovers::Component; use zksync_metadata_calculator::MetadataCalculatorConfig; use zksync_node_api_server::{ - tx_sender::{ApiContracts, TxSenderConfig}, + tx_sender::TxSenderConfig, web3::{state::InternalApiConfig, Namespace}, }; use zksync_node_framework::{ @@ -322,7 +322,6 @@ impl MainNodeBuilder { ), postgres_storage_caches_config, rpc_config.vm_concurrency_limit(), - ApiContracts::load_from_disk_blocking(), // TODO (BFT-138): Allow to dynamically reload API contracts )); Ok(self) } @@ -510,15 +509,21 @@ impl MainNodeBuilder { return Ok(self); }; - match da_client_config.client { - DAClient::Avail(config) => { - self.node.add_layer(AvailWiringLayer::new(config)); + let secrets = self.secrets.data_availability.clone(); + + match (da_client_config, secrets) { + (DAClientConfig::Avail(_), None) => { + anyhow::bail!("Data availability secrets are required for the Avail client"); + } + + (DAClientConfig::Avail(config), Some(DataAvailabilitySecrets::Avail(secret))) => { + self.node.add_layer(AvailWiringLayer::new(config, secret)); } - DAClient::ObjectStore(config) => { + (DAClientConfig::ObjectStore(config), _) => { self.node .add_layer(ObjectStorageClientWiringLayer::new(config)); } - DAClient::EigenDA(config) => { + (DAClientConfig::EigenDA(config), _) => { self.node.add_layer(EigenDAWiringLayer::new(config)); } } diff --git a/core/lib/basic_types/Cargo.toml b/core/lib/basic_types/Cargo.toml index 84411405c2a..616b959b078 100644 --- a/core/lib/basic_types/Cargo.toml +++ b/core/lib/basic_types/Cargo.toml @@ -23,6 +23,7 @@ num_enum.workspace = true anyhow.workspace = true url = { workspace = true, features = ["serde"] } serde_with.workspace = true +secrecy.workspace = true [dev-dependencies] bincode.workspace = true diff --git a/core/lib/basic_types/src/lib.rs b/core/lib/basic_types/src/lib.rs index 6e73d9f5fac..8b6a7f949dd 100644 --- a/core/lib/basic_types/src/lib.rs +++ b/core/lib/basic_types/src/lib.rs @@ -28,6 +28,7 @@ pub mod commitment; pub mod network; pub mod protocol_version; pub mod prover_dal; +pub mod seed_phrase; pub mod settlement; pub mod tee_types; pub mod url; diff --git a/core/lib/basic_types/src/prover_dal.rs b/core/lib/basic_types/src/prover_dal.rs index 7eb67144860..bec5a55ced1 100644 --- a/core/lib/basic_types/src/prover_dal.rs +++ b/core/lib/basic_types/src/prover_dal.rs @@ -2,13 +2,14 @@ use std::{net::IpAddr, ops::Add, str::FromStr}; use chrono::{DateTime, Duration, NaiveDateTime, NaiveTime, Utc}; +use serde::{Deserialize, Serialize}; use strum::{Display, EnumString}; use crate::{ basic_fri_types::AggregationRound, protocol_version::ProtocolVersionId, L1BatchNumber, }; -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Copy)] pub struct FriProverJobMetadata { pub id: u32, pub block_number: L1BatchNumber, @@ -27,7 +28,7 @@ pub struct ExtendedJobCountStatistics { pub successful: usize, } -#[derive(Debug, Clone, Copy, Default)] +#[derive(Debug, Clone, Copy, Default, Serialize, Deserialize)] pub struct JobCountStatistics { pub queued: usize, pub in_progress: usize, diff --git a/core/lib/basic_types/src/seed_phrase.rs b/core/lib/basic_types/src/seed_phrase.rs new file mode 100644 index 00000000000..332bfd58594 --- /dev/null +++ b/core/lib/basic_types/src/seed_phrase.rs @@ -0,0 +1,20 @@ +use std::str::FromStr; + +use secrecy::{ExposeSecret, Secret}; + +#[derive(Debug, Clone)] +pub struct SeedPhrase(pub Secret); + +impl PartialEq for SeedPhrase { + fn eq(&self, other: &Self) -> bool { + self.0.expose_secret().eq(other.0.expose_secret()) + } +} + +impl FromStr for SeedPhrase { + type Err = anyhow::Error; + + fn from_str(s: &str) -> Result { + Ok(SeedPhrase(s.parse()?)) + } +} diff --git a/core/lib/basic_types/src/vm.rs b/core/lib/basic_types/src/vm.rs index c178c853b2d..c753bbfc818 100644 --- a/core/lib/basic_types/src/vm.rs +++ b/core/lib/basic_types/src/vm.rs @@ -32,8 +32,9 @@ pub enum FastVmMode { /// Run only the old VM. #[default] Old, - /// Run only the new Vm. + /// Run only the new VM. New, /// Run both the new and old VM and compare their outputs for each transaction execution. + /// The VM will panic on divergence. Shadow, } diff --git a/core/lib/config/src/configs/api.rs b/core/lib/config/src/configs/api.rs index ca42cd5e5f8..86c9ebd074d 100644 --- a/core/lib/config/src/configs/api.rs +++ b/core/lib/config/src/configs/api.rs @@ -169,6 +169,10 @@ pub struct Web3JsonRpcConfig { pub estimate_gas_scale_factor: f64, /// The max possible number of gas that `eth_estimateGas` is allowed to overestimate. pub estimate_gas_acceptable_overestimation: u32, + /// Enables optimizations for the binary search of the gas limit in `eth_estimateGas`. These optimizations are currently + /// considered experimental. + #[serde(default)] + pub estimate_gas_optimize_search: bool, /// Max possible size of an ABI encoded tx (in bytes). pub max_tx_size: usize, /// Max number of cache misses during one VM execution. If the number of cache misses exceeds this value, the API server panics. @@ -237,6 +241,7 @@ impl Web3JsonRpcConfig { gas_price_scale_factor: 1.2, estimate_gas_scale_factor: 1.2, estimate_gas_acceptable_overestimation: 1000, + estimate_gas_optimize_search: false, max_tx_size: 1000000, vm_execution_cache_misses_limit: Default::default(), vm_concurrency_limit: Default::default(), diff --git a/core/lib/config/src/configs/consensus.rs b/core/lib/config/src/configs/consensus.rs index 759e1312833..918d8f4adab 100644 --- a/core/lib/config/src/configs/consensus.rs +++ b/core/lib/config/src/configs/consensus.rs @@ -92,6 +92,8 @@ pub struct GenesisSpec { pub leader: ValidatorPublicKey, /// Address of the registry contract. pub registry_address: Option, + /// Recommended list of peers to connect to. + pub seed_peers: BTreeMap, } #[derive(Clone, Debug, PartialEq, Default)] @@ -146,6 +148,9 @@ pub struct ConsensusConfig { /// Rate limiting configuration for the p2p RPCs. pub rpc: Option, + + /// Local socket address to expose the node debug page. + pub debug_page_addr: Option, } impl ConsensusConfig { diff --git a/core/lib/config/src/configs/da_client/avail.rs b/core/lib/config/src/configs/da_client/avail.rs index e8d11978791..590dc5fef18 100644 --- a/core/lib/config/src/configs/da_client/avail.rs +++ b/core/lib/config/src/configs/da_client/avail.rs @@ -1,11 +1,16 @@ use serde::Deserialize; +use zksync_basic_types::seed_phrase::SeedPhrase; #[derive(Clone, Debug, PartialEq, Deserialize)] pub struct AvailConfig { pub api_node_url: String, pub bridge_api_url: String, - pub seed: String, pub app_id: u32, pub timeout: usize, pub max_retries: usize, } + +#[derive(Clone, Debug, PartialEq)] +pub struct AvailSecrets { + pub seed_phrase: Option, +} diff --git a/core/lib/config/src/configs/da_client/mod.rs b/core/lib/config/src/configs/da_client/mod.rs index c8fd81ca471..7157aa48916 100644 --- a/core/lib/config/src/configs/da_client/mod.rs +++ b/core/lib/config/src/configs/da_client/mod.rs @@ -1,5 +1,4 @@ use eigen_da::EigenDAConfig; -use serde::Deserialize; use crate::{AvailConfig, ObjectStoreConfig}; @@ -12,13 +11,7 @@ pub const AVAIL_CLIENT_CONFIG_NAME: &str = "Avail"; pub const OBJECT_STORE_CLIENT_CONFIG_NAME: &str = "ObjectStore"; #[derive(Debug, Clone, PartialEq)] -pub struct DAClientConfig { - pub client: DAClient, -} - -#[derive(Debug, Clone, PartialEq, Deserialize)] -#[serde(tag = "client")] -pub enum DAClient { +pub enum DAClientConfig { Avail(AvailConfig), ObjectStore(ObjectStoreConfig), EigenDA(EigenDAConfig), diff --git a/core/lib/config/src/configs/en_config.rs b/core/lib/config/src/configs/en_config.rs index 7f130e3539a..4cab47b0779 100644 --- a/core/lib/config/src/configs/en_config.rs +++ b/core/lib/config/src/configs/en_config.rs @@ -1,4 +1,4 @@ -use std::num::NonZeroUsize; +use std::num::{NonZeroU64, NonZeroUsize}; use serde::Deserialize; use zksync_basic_types::{ @@ -19,4 +19,5 @@ pub struct ENConfig { pub main_node_rate_limit_rps: Option, pub gateway_url: Option, + pub bridge_addresses_refresh_interval_sec: Option, } diff --git a/core/lib/config/src/configs/eth_sender.rs b/core/lib/config/src/configs/eth_sender.rs index 7e6ef2244cb..3a1a0505728 100644 --- a/core/lib/config/src/configs/eth_sender.rs +++ b/core/lib/config/src/configs/eth_sender.rs @@ -42,6 +42,7 @@ impl EthConfig { pubdata_sending_mode: PubdataSendingMode::Calldata, tx_aggregation_paused: false, tx_aggregation_only_prove_and_execute: false, + time_in_mempool_in_l1_blocks_cap: 1800, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 1000000000, @@ -127,6 +128,10 @@ pub struct SenderConfig { /// special mode specifically for gateway migration to decrease number of non-executed batches #[serde(default = "SenderConfig::default_tx_aggregation_only_prove_and_execute")] pub tx_aggregation_only_prove_and_execute: bool, + + /// Cap of time in mempool for price calculations + #[serde(default = "SenderConfig::default_time_in_mempool_in_l1_blocks_cap")] + pub time_in_mempool_in_l1_blocks_cap: u32, } impl SenderConfig { @@ -168,6 +173,13 @@ impl SenderConfig { const fn default_tx_aggregation_only_prove_and_execute() -> bool { false } + + pub const fn default_time_in_mempool_in_l1_blocks_cap() -> u32 { + let blocks_per_hour = 3600 / 12; + // we cap it at 6h to not allow nearly infinite values when a tx is stuck for a long time + // 1,001 ^ 1800 ~= 6, so by default we cap exponential price formula at roughly median * 6 + blocks_per_hour * 6 + } } #[derive(Debug, Deserialize, Copy, Clone, PartialEq, Default)] @@ -177,8 +189,10 @@ pub struct GasAdjusterConfig { /// Number of blocks collected by GasAdjuster from which base_fee median is taken pub max_base_fee_samples: usize, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_a")] pub pricing_formula_parameter_a: f64, /// Parameter of the transaction base_fee_per_gas pricing formula + #[serde(default = "GasAdjusterConfig::default_pricing_formula_parameter_b")] pub pricing_formula_parameter_b: f64, /// Parameter by which the base fee will be multiplied for internal purposes pub internal_l1_pricing_multiplier: f64, @@ -225,4 +239,12 @@ impl GasAdjusterConfig { pub const fn default_internal_pubdata_pricing_multiplier() -> f64 { 1.0 } + + pub const fn default_pricing_formula_parameter_a() -> f64 { + 1.1 + } + + pub const fn default_pricing_formula_parameter_b() -> f64 { + 1.001 + } } diff --git a/core/lib/config/src/configs/prover_job_monitor.rs b/core/lib/config/src/configs/prover_job_monitor.rs index c16b1db81b7..d60a0e90c20 100644 --- a/core/lib/config/src/configs/prover_job_monitor.rs +++ b/core/lib/config/src/configs/prover_job_monitor.rs @@ -61,6 +61,8 @@ pub struct ProverJobMonitorConfig { /// The interval between runs for Witness Job Queuer. #[serde(default = "ProverJobMonitorConfig::default_witness_job_queuer_run_interval_ms")] pub witness_job_queuer_run_interval_ms: u64, + /// HTTP port of the ProverJobMonitor to send requests to. + pub http_port: u16, } impl ProverJobMonitorConfig { diff --git a/core/lib/config/src/configs/secrets.rs b/core/lib/config/src/configs/secrets.rs index 71197f5d930..779bad37065 100644 --- a/core/lib/config/src/configs/secrets.rs +++ b/core/lib/config/src/configs/secrets.rs @@ -1,7 +1,7 @@ use anyhow::Context; use zksync_basic_types::url::SensitiveUrl; -use crate::configs::consensus::ConsensusSecrets; +use crate::configs::{consensus::ConsensusSecrets, da_client::avail::AvailSecrets}; #[derive(Debug, Clone, PartialEq)] pub struct DatabaseSecrets { @@ -15,11 +15,17 @@ pub struct L1Secrets { pub l1_rpc_url: SensitiveUrl, } +#[derive(Debug, Clone, PartialEq)] +pub enum DataAvailabilitySecrets { + Avail(AvailSecrets), +} + #[derive(Debug, Clone, PartialEq)] pub struct Secrets { pub consensus: Option, pub database: Option, pub l1: Option, + pub data_availability: Option, } impl DatabaseSecrets { diff --git a/core/lib/config/src/testonly.rs b/core/lib/config/src/testonly.rs index a106acd5a2f..e25f67abb06 100644 --- a/core/lib/config/src/testonly.rs +++ b/core/lib/config/src/testonly.rs @@ -1,11 +1,13 @@ use std::num::NonZeroUsize; use rand::{distributions::Distribution, Rng}; +use secrecy::Secret; use zksync_basic_types::{ basic_fri_types::CircuitIdRoundTuple, commitment::L1BatchCommitmentMode, network::Network, protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + seed_phrase::SeedPhrase, vm::FastVmMode, L1BatchNumber, L1ChainId, L2ChainId, }; @@ -14,7 +16,7 @@ use zksync_crypto_primitives::K256PrivateKey; use crate::{ configs::{ - self, da_client::DAClient::Avail, eth_sender::PubdataSendingMode, + self, da_client::DAClientConfig::Avail, eth_sender::PubdataSendingMode, external_price_api_client::ForcedPriceClientConfig, }, AvailConfig, @@ -77,6 +79,7 @@ impl Distribution for EncodeDist { gas_price_scale_factor: self.sample(rng), estimate_gas_scale_factor: self.sample(rng), estimate_gas_acceptable_overestimation: self.sample(rng), + estimate_gas_optimize_search: self.sample(rng), max_tx_size: self.sample(rng), vm_execution_cache_misses_limit: self.sample(rng), vm_concurrency_limit: self.sample(rng), @@ -416,6 +419,7 @@ impl Distribution for EncodeDist { pubdata_sending_mode: PubdataSendingMode::Calldata, tx_aggregation_paused: false, tx_aggregation_only_prove_and_execute: false, + time_in_mempool_in_l1_blocks_cap: self.sample(rng), } } } @@ -774,7 +778,9 @@ impl Distribution for EncodeDist { impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::consensus::GenesisSpec { - use configs::consensus::{GenesisSpec, ProtocolVersion, ValidatorPublicKey}; + use configs::consensus::{ + GenesisSpec, Host, NodePublicKey, ProtocolVersion, ValidatorPublicKey, + }; GenesisSpec { chain_id: L2ChainId::default(), protocol_version: ProtocolVersion(self.sample(rng)), @@ -782,6 +788,10 @@ impl Distribution for EncodeDist { attesters: self.sample_collect(rng), leader: ValidatorPublicKey(self.sample(rng)), registry_address: self.sample_opt(|| rng.gen()), + seed_peers: self + .sample_range(rng) + .map(|_| (NodePublicKey(self.sample(rng)), Host(self.sample(rng)))) + .collect(), } } } @@ -805,6 +815,7 @@ impl Distribution for EncodeDist { .collect(), genesis_spec: self.sample(rng), rpc: self.sample(rng), + debug_page_addr: self.sample(rng), } } } @@ -857,6 +868,7 @@ impl Distribution for EncodeDist { consensus: self.sample_opt(|| self.sample(rng)), database: self.sample_opt(|| self.sample(rng)), l1: self.sample_opt(|| self.sample(rng)), + data_availability: self.sample_opt(|| self.sample(rng)), } } } @@ -922,22 +934,28 @@ impl Distribution for EncodeDist { main_node_rate_limit_rps: self.sample_opt(|| rng.gen()), gateway_url: self .sample_opt(|| format!("localhost:{}", rng.gen::()).parse().unwrap()), + bridge_addresses_refresh_interval_sec: self.sample_opt(|| rng.gen()), } } } impl Distribution for EncodeDist { fn sample(&self, rng: &mut R) -> configs::da_client::DAClientConfig { - configs::da_client::DAClientConfig { - client: Avail(AvailConfig { - api_node_url: self.sample(rng), - bridge_api_url: self.sample(rng), - seed: self.sample(rng), - app_id: self.sample(rng), - timeout: self.sample(rng), - max_retries: self.sample(rng), - }), - } + Avail(AvailConfig { + api_node_url: self.sample(rng), + bridge_api_url: self.sample(rng), + app_id: self.sample(rng), + timeout: self.sample(rng), + max_retries: self.sample(rng), + }) + } +} + +impl Distribution for EncodeDist { + fn sample(&self, rng: &mut R) -> configs::secrets::DataAvailabilitySecrets { + configs::secrets::DataAvailabilitySecrets::Avail(configs::da_client::avail::AvailSecrets { + seed_phrase: Some(SeedPhrase(Secret::new(self.sample(rng)))), + }) } } @@ -1114,6 +1132,7 @@ impl Distribution for Encod prover_queue_reporter_run_interval_ms: self.sample(rng), witness_generator_queue_reporter_run_interval_ms: self.sample(rng), witness_job_queuer_run_interval_ms: self.sample(rng), + http_port: self.sample(rng), } } } diff --git a/core/lib/contracts/src/lib.rs b/core/lib/contracts/src/lib.rs index a60d9fbf181..a72b5c95d1b 100644 --- a/core/lib/contracts/src/lib.rs +++ b/core/lib/contracts/src/lib.rs @@ -49,12 +49,13 @@ const DIAMOND_INIT_CONTRACT_FILE: (&str, &str) = ( const GOVERNANCE_CONTRACT_FILE: (&str, &str) = ("governance", "IGovernance.sol/IGovernance.json"); const CHAIN_ADMIN_CONTRACT_FILE: (&str, &str) = ("governance", "IChainAdmin.sol/IChainAdmin.json"); const GETTERS_FACET_CONTRACT_FILE: (&str, &str) = ( - "state-transition/chain-deps/facets", - "Getters.sol/GettersFacet.json", + "state-transition/chain-interfaces", + "IGetters.sol/IGetters.json", ); const MULTICALL3_CONTRACT_FILE: (&str, &str) = ("dev-contracts", "Multicall3.sol/Multicall3.json"); const VERIFIER_CONTRACT_FILE: (&str, &str) = ("state-transition", "Verifier.sol/Verifier.json"); + const _IERC20_CONTRACT_FILE: &str = "contracts/l1-contracts/artifacts/contracts/common/interfaces/IERC20.sol/IERC20.json"; const _FAIL_ON_RECEIVE_CONTRACT_FILE: &str = diff --git a/core/lib/dal/.sqlx/query-f4e7e2dd109aab2714845b028eba780c75468aa36615bcfcf7f7d8ace23cd1aa.json b/core/lib/dal/.sqlx/query-0458bb8eb595e470b3283847d6c89fd9567bb72dd8b1c604503050b8dab91521.json similarity index 80% rename from core/lib/dal/.sqlx/query-f4e7e2dd109aab2714845b028eba780c75468aa36615bcfcf7f7d8ace23cd1aa.json rename to core/lib/dal/.sqlx/query-0458bb8eb595e470b3283847d6c89fd9567bb72dd8b1c604503050b8dab91521.json index a8e5dbf66d5..c1400f955b9 100644 --- a/core/lib/dal/.sqlx/query-f4e7e2dd109aab2714845b028eba780c75468aa36615bcfcf7f7d8ace23cd1aa.json +++ b/core/lib/dal/.sqlx/query-0458bb8eb595e470b3283847d6c89fd9567bb72dd8b1c604503050b8dab91521.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n PG_LAST_WAL_RECEIVE_LSN() = PG_LAST_WAL_REPLAY_LSN() AS synced,\n EXTRACT(\n SECONDS\n FROM\n NOW() - PG_LAST_XACT_REPLAY_TIMESTAMP()\n )::INT AS LAG\n ", + "query": "\n SELECT\n PG_LAST_WAL_RECEIVE_LSN() = PG_LAST_WAL_REPLAY_LSN() AS synced,\n EXTRACT(\n seconds\n FROM\n NOW() - PG_LAST_XACT_REPLAY_TIMESTAMP()\n )::INT AS LAG\n ", "describe": { "columns": [ { @@ -22,5 +22,5 @@ null ] }, - "hash": "f4e7e2dd109aab2714845b028eba780c75468aa36615bcfcf7f7d8ace23cd1aa" + "hash": "0458bb8eb595e470b3283847d6c89fd9567bb72dd8b1c604503050b8dab91521" } diff --git a/core/lib/dal/.sqlx/query-c6d523c6ae857022318350a2f210d7eaeeb4549ed59b58f8d984be2a22a80355.json b/core/lib/dal/.sqlx/query-13701f55aab0a278a29e21ac2326852faf20600198aa78f66ad8ba9a37ac455f.json similarity index 85% rename from core/lib/dal/.sqlx/query-c6d523c6ae857022318350a2f210d7eaeeb4549ed59b58f8d984be2a22a80355.json rename to core/lib/dal/.sqlx/query-13701f55aab0a278a29e21ac2326852faf20600198aa78f66ad8ba9a37ac455f.json index ebae1f42fbb..86393dce8ab 100644 --- a/core/lib/dal/.sqlx/query-c6d523c6ae857022318350a2f210d7eaeeb4549ed59b58f8d984be2a22a80355.json +++ b/core/lib/dal/.sqlx/query-13701f55aab0a278a29e21ac2326852faf20600198aa78f66ad8ba9a37ac455f.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(l1_batches.number)\n FROM\n l1_batches\n JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id)\n JOIN eth_txs_history AS commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id)\n WHERE\n commit_tx.confirmed_at IS NOT NULL\n AND eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n AND EXTRACT(\n epoch\n FROM\n commit_tx.confirmed_at\n ) < $1\n ", + "query": "\n SELECT\n MAX(l1_batches.number)\n FROM\n l1_batches\n JOIN eth_txs ON (l1_batches.eth_commit_tx_id = eth_txs.id)\n JOIN eth_txs_history AS commit_tx ON (eth_txs.confirmed_eth_tx_history_id = commit_tx.id)\n WHERE\n commit_tx.confirmed_at IS NOT NULL\n AND eth_prove_tx_id IS NOT NULL\n AND eth_execute_tx_id IS NULL\n AND EXTRACT(\n EPOCH\n FROM\n commit_tx.confirmed_at\n ) < $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ null ] }, - "hash": "c6d523c6ae857022318350a2f210d7eaeeb4549ed59b58f8d984be2a22a80355" + "hash": "13701f55aab0a278a29e21ac2326852faf20600198aa78f66ad8ba9a37ac455f" } diff --git a/core/lib/dal/.sqlx/query-e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767.json b/core/lib/dal/.sqlx/query-139c7ea1d6ccfb7d0f56d00cde196f9dd5372b5ef41eaa39af58eed5af777df1.json similarity index 65% rename from core/lib/dal/.sqlx/query-e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767.json rename to core/lib/dal/.sqlx/query-139c7ea1d6ccfb7d0f56d00cde196f9dd5372b5ef41eaa39af58eed5af777df1.json index de9937ef7b9..305bb493651 100644 --- a/core/lib/dal/.sqlx/query-e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767.json +++ b/core/lib/dal/.sqlx/query-139c7ea1d6ccfb7d0f56d00cde196f9dd5372b5ef41eaa39af58eed5af777df1.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n WITH\n events AS (\n SELECT DISTINCT\n ON (events.tx_hash) *\n FROM\n events\n WHERE\n events.address = $1\n AND events.topic1 = $2\n AND events.tx_hash = ANY ($3)\n ORDER BY\n events.tx_hash,\n events.event_index_in_tx DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS index_in_block,\n transactions.l1_batch_tx_index AS l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error AS error,\n transactions.effective_gas_price AS effective_gas_price,\n transactions.initiator_address AS initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas AS refunded_gas,\n transactions.gas_limit AS gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n events.topic4 AS \"contract_address?\",\n miniblocks.timestamp AS \"block_timestamp?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN events ON events.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", + "query": "\n WITH\n events AS (\n SELECT DISTINCT\n ON (events.tx_hash) *\n FROM\n events\n WHERE\n events.address = $1\n AND events.topic1 = $2\n AND events.tx_hash = ANY ($3)\n ORDER BY\n events.tx_hash,\n events.event_index_in_tx DESC\n )\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block,\n transactions.l1_batch_tx_index,\n transactions.miniblock_number AS \"block_number!\",\n transactions.error,\n transactions.effective_gas_price,\n transactions.initiator_address,\n transactions.data -> 'to' AS \"transfer_to?\",\n transactions.data -> 'contractAddress' AS \"execute_contract_address?\",\n transactions.tx_format AS \"tx_format?\",\n transactions.refunded_gas,\n transactions.gas_limit,\n miniblocks.hash AS \"block_hash\",\n miniblocks.l1_batch_number AS \"l1_batch_number?\",\n events.topic4 AS \"contract_address?\",\n miniblocks.timestamp AS \"block_timestamp?\"\n FROM\n transactions\n JOIN miniblocks ON miniblocks.number = transactions.miniblock_number\n LEFT JOIN events ON events.tx_hash = transactions.hash\n WHERE\n transactions.hash = ANY ($3)\n AND transactions.data != '{}'::jsonb\n ", "describe": { "columns": [ { @@ -110,5 +110,5 @@ false ] }, - "hash": "e1e8ab0cb11c6081d3525228eacbad74e1bab808c744fa14bf24332b39120767" + "hash": "139c7ea1d6ccfb7d0f56d00cde196f9dd5372b5ef41eaa39af58eed5af777df1" } diff --git a/core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json b/core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json new file mode 100644 index 00000000000..ee5de53d6e6 --- /dev/null +++ b/core/lib/dal/.sqlx/query-26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448.json @@ -0,0 +1,33 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n next_block_to_process\n FROM\n processed_events\n WHERE\n TYPE = $1\n AND chain_id = $2\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "next_block_to_process", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + { + "Custom": { + "name": "event_type", + "kind": { + "Enum": [ + "ProtocolUpgrades", + "PriorityTransactions" + ] + } + } + }, + "Int8" + ] + }, + "nullable": [ + false + ] + }, + "hash": "26c80e9bafcf7989e7d40c6e424240b34ce29aad3ac6571116e084d45574c448" +} diff --git a/core/lib/dal/.sqlx/query-66b3f476cf4487239190175d2940fd5e1330393d8b04c7cf03b8c7ab2edad5d1.json b/core/lib/dal/.sqlx/query-28e03d23835e86f77a27735ac0ec801788d834bc1ea9b8194e4461c3df979f8b.json similarity index 88% rename from core/lib/dal/.sqlx/query-66b3f476cf4487239190175d2940fd5e1330393d8b04c7cf03b8c7ab2edad5d1.json rename to core/lib/dal/.sqlx/query-28e03d23835e86f77a27735ac0ec801788d834bc1ea9b8194e4461c3df979f8b.json index 0cf4a1e3200..c61184d56c7 100644 --- a/core/lib/dal/.sqlx/query-66b3f476cf4487239190175d2940fd5e1330393d8b04c7cf03b8c7ab2edad5d1.json +++ b/core/lib/dal/.sqlx/query-28e03d23835e86f77a27735ac0ec801788d834bc1ea9b8194e4461c3df979f8b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n upgrade_id,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n miniblock_number,\n index_in_block,\n error,\n execution_info,\n refunded_gas,\n effective_gas_price,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n TRUE,\n data_table.initiator_address,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.data,\n data_table.upgrade_id,\n data_table.contract_address,\n data_table.l1_block_number,\n data_table.value,\n '\\x0000000000000000000000000000000000000000'::bytea,\n '\\x'::bytea,\n data_table.tx_format,\n data_table.l1_tx_mint,\n data_table.l1_tx_refund_recipient,\n $19,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.execution_info,\n data_table.refunded_gas,\n data_table.effective_gas_price,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::BYTEA[]) AS hash,\n UNNEST($2::BYTEA[]) AS initiator_address,\n UNNEST($3::NUMERIC[]) AS gas_limit,\n UNNEST($4::NUMERIC[]) AS max_fee_per_gas,\n UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit,\n UNNEST($6::JSONB[]) AS data,\n UNNEST($7::INT[]) AS upgrade_id,\n UNNEST($8::BYTEA[]) AS contract_address,\n UNNEST($9::INT[]) AS l1_block_number,\n UNNEST($10::NUMERIC[]) AS value,\n UNNEST($11::INTEGER[]) AS tx_format,\n UNNEST($12::NUMERIC[]) AS l1_tx_mint,\n UNNEST($13::BYTEA[]) AS l1_tx_refund_recipient,\n UNNEST($14::INT[]) AS index_in_block,\n UNNEST($15::VARCHAR[]) AS error,\n UNNEST($16::JSONB[]) AS execution_info,\n UNNEST($17::BIGINT[]) AS refunded_gas,\n UNNEST($18::NUMERIC[]) AS effective_gas_price\n ) AS data_table\n ", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n upgrade_id,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n miniblock_number,\n index_in_block,\n error,\n execution_info,\n refunded_gas,\n effective_gas_price,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n TRUE,\n data_table.initiator_address,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.data,\n data_table.upgrade_id,\n data_table.contract_address,\n data_table.l1_block_number,\n data_table.value,\n '\\x0000000000000000000000000000000000000000'::bytea,\n '\\x'::bytea,\n data_table.tx_format,\n data_table.l1_tx_mint,\n data_table.l1_tx_refund_recipient,\n $19,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.execution_info,\n data_table.refunded_gas,\n data_table.effective_gas_price,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS hash,\n UNNEST($2::bytea[]) AS initiator_address,\n UNNEST($3::NUMERIC[]) AS gas_limit,\n UNNEST($4::NUMERIC[]) AS max_fee_per_gas,\n UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit,\n UNNEST($6::jsonb[]) AS data,\n UNNEST($7::INT[]) AS upgrade_id,\n UNNEST($8::bytea[]) AS contract_address,\n UNNEST($9::INT[]) AS l1_block_number,\n UNNEST($10::NUMERIC[]) AS value,\n UNNEST($11::INTEGER[]) AS tx_format,\n UNNEST($12::NUMERIC[]) AS l1_tx_mint,\n UNNEST($13::bytea[]) AS l1_tx_refund_recipient,\n UNNEST($14::INT[]) AS index_in_block,\n UNNEST($15::VARCHAR[]) AS error,\n UNNEST($16::jsonb[]) AS execution_info,\n UNNEST($17::BIGINT[]) AS refunded_gas,\n UNNEST($18::NUMERIC[]) AS effective_gas_price\n ) AS data_table\n ", "describe": { "columns": [], "parameters": { @@ -28,5 +28,5 @@ }, "nullable": [] }, - "hash": "66b3f476cf4487239190175d2940fd5e1330393d8b04c7cf03b8c7ab2edad5d1" + "hash": "28e03d23835e86f77a27735ac0ec801788d834bc1ea9b8194e4461c3df979f8b" } diff --git a/core/lib/dal/.sqlx/query-294005d0b9445cc8b9c8e4ce7453f71664dcb5ebbc35005a18c5251c3d902f62.json b/core/lib/dal/.sqlx/query-294005d0b9445cc8b9c8e4ce7453f71664dcb5ebbc35005a18c5251c3d902f62.json deleted file mode 100644 index e2aeb15b19a..00000000000 --- a/core/lib/dal/.sqlx/query-294005d0b9445cc8b9c8e4ce7453f71664dcb5ebbc35005a18c5251c3d902f62.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(INDEX) AS \"max?\"\n FROM\n initial_writes\n WHERE\n l1_batch_number = (\n SELECT\n MAX(l1_batch_number) AS \"max?\"\n FROM\n initial_writes\n WHERE\n l1_batch_number <= $1\n )\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "max?", - "type_info": "Int8" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - null - ] - }, - "hash": "294005d0b9445cc8b9c8e4ce7453f71664dcb5ebbc35005a18c5251c3d902f62" -} diff --git a/core/lib/dal/.sqlx/query-6c6b8045f9dd7bdb9ada0d71974f28f7b515cf894e63fc95165c9211f71daa36.json b/core/lib/dal/.sqlx/query-30e7d640a731e5c7dccc6d34b94f7116562896314c4d413847854c09c4a14d4e.json similarity index 69% rename from core/lib/dal/.sqlx/query-6c6b8045f9dd7bdb9ada0d71974f28f7b515cf894e63fc95165c9211f71daa36.json rename to core/lib/dal/.sqlx/query-30e7d640a731e5c7dccc6d34b94f7116562896314c4d413847854c09c4a14d4e.json index 1a538831c96..09e0a9360ee 100644 --- a/core/lib/dal/.sqlx/query-6c6b8045f9dd7bdb9ada0d71974f28f7b515cf894e63fc95165c9211f71daa36.json +++ b/core/lib/dal/.sqlx/query-30e7d640a731e5c7dccc6d34b94f7116562896314c4d413847854c09c4a14d4e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n VERSION,\n l1_batch_number,\n factory_deps_filepath,\n storage_logs_filepaths\n FROM\n snapshots\n WHERE\n l1_batch_number = $1\n ", + "query": "\n SELECT\n VERSION,\n L1_BATCH_NUMBER,\n FACTORY_DEPS_FILEPATH,\n STORAGE_LOGS_FILEPATHS\n FROM\n SNAPSHOTS\n WHERE\n L1_BATCH_NUMBER = $1\n ", "describe": { "columns": [ { @@ -36,5 +36,5 @@ false ] }, - "hash": "6c6b8045f9dd7bdb9ada0d71974f28f7b515cf894e63fc95165c9211f71daa36" + "hash": "30e7d640a731e5c7dccc6d34b94f7116562896314c4d413847854c09c4a14d4e" } diff --git a/core/lib/dal/.sqlx/query-8ce0812420fbf35cbbbb0f77f850a54653a5c06c665433718f7ccbbf936f76ca.json b/core/lib/dal/.sqlx/query-33bfa40b6519f05ec2ee8c7a2372b9764b5b4b840573c35b57e011330bd132a3.json similarity index 61% rename from core/lib/dal/.sqlx/query-8ce0812420fbf35cbbbb0f77f850a54653a5c06c665433718f7ccbbf936f76ca.json rename to core/lib/dal/.sqlx/query-33bfa40b6519f05ec2ee8c7a2372b9764b5b4b840573c35b57e011330bd132a3.json index 7ad183d57e5..b17fcff6d61 100644 --- a/core/lib/dal/.sqlx/query-8ce0812420fbf35cbbbb0f77f850a54653a5c06c665433718f7ccbbf936f76ca.json +++ b/core/lib/dal/.sqlx/query-33bfa40b6519f05ec2ee8c7a2372b9764b5b4b840573c35b57e011330bd132a3.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n COUNT(*) AS COUNT\n FROM\n storage_logs\n WHERE\n miniblock_number <= $1\n ", + "query": "\n SELECT\n COUNT(*) AS COUNT\n FROM\n STORAGE_LOGS\n WHERE\n MINIBLOCK_NUMBER <= $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ null ] }, - "hash": "8ce0812420fbf35cbbbb0f77f850a54653a5c06c665433718f7ccbbf936f76ca" + "hash": "33bfa40b6519f05ec2ee8c7a2372b9764b5b4b840573c35b57e011330bd132a3" } diff --git a/core/lib/dal/.sqlx/query-44ec5b4d2218099da30aa8d654f7445aa3148d8abdd4e5fb50947ce512f53690.json b/core/lib/dal/.sqlx/query-44ec5b4d2218099da30aa8d654f7445aa3148d8abdd4e5fb50947ce512f53690.json new file mode 100644 index 00000000000..43b00071bd4 --- /dev/null +++ b/core/lib/dal/.sqlx/query-44ec5b4d2218099da30aa8d654f7445aa3148d8abdd4e5fb50947ce512f53690.json @@ -0,0 +1,14 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE consensus_replica_state\n SET\n global_config = $1\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Jsonb" + ] + }, + "nullable": [] + }, + "hash": "44ec5b4d2218099da30aa8d654f7445aa3148d8abdd4e5fb50947ce512f53690" +} diff --git a/core/lib/dal/.sqlx/query-5d1f588b2b652105b4452be1cc6c7573fe040d47d17d9bd4e2a2b6d3131a4f9c.json b/core/lib/dal/.sqlx/query-477b6d651c309ed2ab1a361b7f7816c233e16da7330c9327f4020c528f670a33.json similarity index 82% rename from core/lib/dal/.sqlx/query-5d1f588b2b652105b4452be1cc6c7573fe040d47d17d9bd4e2a2b6d3131a4f9c.json rename to core/lib/dal/.sqlx/query-477b6d651c309ed2ab1a361b7f7816c233e16da7330c9327f4020c528f670a33.json index 8d566e0c6cf..68d2f046a17 100644 --- a/core/lib/dal/.sqlx/query-5d1f588b2b652105b4452be1cc6c7573fe040d47d17d9bd4e2a2b6d3131a4f9c.json +++ b/core/lib/dal/.sqlx/query-477b6d651c309ed2ab1a361b7f7816c233e16da7330c9327f4020c528f670a33.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE transactions\n SET\n l1_batch_number = $3,\n l1_batch_tx_index = data_table.l1_batch_tx_index,\n updated_at = NOW()\n FROM\n (\n SELECT\n UNNEST($1::INT[]) AS l1_batch_tx_index,\n UNNEST($2::bytea[]) AS hash\n ) AS data_table\n WHERE\n transactions.hash = data_table.hash\n ", + "query": "\n UPDATE transactions\n SET\n l1_batch_number = $3,\n l1_batch_tx_index = data_table.l1_batch_tx_index,\n updated_at = NOW()\n FROM\n (\n SELECT\n UNNEST($1::INT[]) AS l1_batch_tx_index,\n UNNEST($2::BYTEA[]) AS hash\n ) AS data_table\n WHERE\n transactions.hash = data_table.hash\n ", "describe": { "columns": [], "parameters": { @@ -12,5 +12,5 @@ }, "nullable": [] }, - "hash": "5d1f588b2b652105b4452be1cc6c7573fe040d47d17d9bd4e2a2b6d3131a4f9c" + "hash": "477b6d651c309ed2ab1a361b7f7816c233e16da7330c9327f4020c528f670a33" } diff --git a/core/lib/dal/.sqlx/query-2d862097cfae49a1fb28ec0a05176085385c3a79d72f49669b4215a9454323c2.json b/core/lib/dal/.sqlx/query-5b25851e01f30307391412ced12ee7053fe3ba7ec852cfb43b249eed9af8209b.json similarity index 58% rename from core/lib/dal/.sqlx/query-2d862097cfae49a1fb28ec0a05176085385c3a79d72f49669b4215a9454323c2.json rename to core/lib/dal/.sqlx/query-5b25851e01f30307391412ced12ee7053fe3ba7ec852cfb43b249eed9af8209b.json index e08f0c85517..02b638851db 100644 --- a/core/lib/dal/.sqlx/query-2d862097cfae49a1fb28ec0a05176085385c3a79d72f49669b4215a9454323c2.json +++ b/core/lib/dal/.sqlx/query-5b25851e01f30307391412ced12ee7053fe3ba7ec852cfb43b249eed9af8209b.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n INDEX\n FROM\n initial_writes\n WHERE\n l1_batch_number <= $1\n ORDER BY\n l1_batch_number DESC,\n INDEX DESC\n LIMIT\n 1;\n ", + "query": "\n SELECT\n INDEX\n FROM\n INITIAL_WRITES\n WHERE\n L1_BATCH_NUMBER <= $1\n ORDER BY\n L1_BATCH_NUMBER DESC,\n INDEX DESC\n LIMIT\n 1;\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "2d862097cfae49a1fb28ec0a05176085385c3a79d72f49669b4215a9454323c2" + "hash": "5b25851e01f30307391412ced12ee7053fe3ba7ec852cfb43b249eed9af8209b" } diff --git a/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json b/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json new file mode 100644 index 00000000000..84ff845b0d0 --- /dev/null +++ b/core/lib/dal/.sqlx/query-6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa.json @@ -0,0 +1,34 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n transactions.hash AS tx_hash,\n transactions.index_in_block AS tx_index_in_block,\n call_trace\n FROM\n call_traces\n INNER JOIN transactions ON tx_hash = transactions.hash\n WHERE\n transactions.miniblock_number = $1\n ORDER BY\n transactions.index_in_block\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "tx_hash", + "type_info": "Bytea" + }, + { + "ordinal": 1, + "name": "tx_index_in_block", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "call_trace", + "type_info": "Bytea" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + false, + true, + false + ] + }, + "hash": "6171f2d631f69dba52cd913742a46bd1b7b9269375f11f050099cb6d3c1427aa" +} diff --git a/core/lib/dal/.sqlx/query-a8d55f14de2f489fdc12c8da540e0d51f86d30d5ad92496e4aa087a3c4aae09f.json b/core/lib/dal/.sqlx/query-785865beac89aff53fe5493d92cfa3245a75736d14b1ff0799f5f05cd4a247a4.json similarity index 89% rename from core/lib/dal/.sqlx/query-a8d55f14de2f489fdc12c8da540e0d51f86d30d5ad92496e4aa087a3c4aae09f.json rename to core/lib/dal/.sqlx/query-785865beac89aff53fe5493d92cfa3245a75736d14b1ff0799f5f05cd4a247a4.json index b00999f2144..e10f32b85eb 100644 --- a/core/lib/dal/.sqlx/query-a8d55f14de2f489fdc12c8da540e0d51f86d30d5ad92496e4aa087a3c4aae09f.json +++ b/core/lib/dal/.sqlx/query-785865beac89aff53fe5493d92cfa3245a75736d14b1ff0799f5f05cd4a247a4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n miniblock_number,\n index_in_block,\n error,\n execution_info,\n refunded_gas,\n effective_gas_price,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n TRUE,\n data_table.initiator_address,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.data,\n data_table.priority_op_id,\n data_table.full_fee,\n data_table.layer_2_tip_fee,\n data_table.contract_address,\n data_table.l1_block_number,\n data_table.value,\n '\\x0000000000000000000000000000000000000000'::bytea,\n '\\x'::bytea,\n data_table.tx_format,\n data_table.l1_tx_mint,\n data_table.l1_tx_refund_recipient,\n $21,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.execution_info,\n data_table.refunded_gas,\n data_table.effective_gas_price,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::BYTEA[]) AS hash,\n UNNEST($2::BYTEA[]) AS initiator_address,\n UNNEST($3::NUMERIC[]) AS gas_limit,\n UNNEST($4::NUMERIC[]) AS max_fee_per_gas,\n UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit,\n UNNEST($6::JSONB[]) AS data,\n UNNEST($7::BIGINT[]) AS priority_op_id,\n UNNEST($8::NUMERIC[]) AS full_fee,\n UNNEST($9::NUMERIC[]) AS layer_2_tip_fee,\n UNNEST($10::BYTEA[]) AS contract_address,\n UNNEST($11::INT[]) AS l1_block_number,\n UNNEST($12::NUMERIC[]) AS value,\n UNNEST($13::INTEGER[]) AS tx_format,\n UNNEST($14::NUMERIC[]) AS l1_tx_mint,\n UNNEST($15::BYTEA[]) AS l1_tx_refund_recipient,\n UNNEST($16::INT[]) AS index_in_block,\n UNNEST($17::VARCHAR[]) AS error,\n UNNEST($18::JSONB[]) AS execution_info,\n UNNEST($19::BIGINT[]) AS refunded_gas,\n UNNEST($20::NUMERIC[]) AS effective_gas_price\n ) AS data_table\n ", + "query": "\n INSERT INTO\n transactions (\n hash,\n is_priority,\n initiator_address,\n gas_limit,\n max_fee_per_gas,\n gas_per_pubdata_limit,\n data,\n priority_op_id,\n full_fee,\n layer_2_tip_fee,\n contract_address,\n l1_block_number,\n value,\n paymaster,\n paymaster_input,\n tx_format,\n l1_tx_mint,\n l1_tx_refund_recipient,\n miniblock_number,\n index_in_block,\n error,\n execution_info,\n refunded_gas,\n effective_gas_price,\n received_at,\n created_at,\n updated_at\n )\n SELECT\n data_table.hash,\n TRUE,\n data_table.initiator_address,\n data_table.gas_limit,\n data_table.max_fee_per_gas,\n data_table.gas_per_pubdata_limit,\n data_table.data,\n data_table.priority_op_id,\n data_table.full_fee,\n data_table.layer_2_tip_fee,\n data_table.contract_address,\n data_table.l1_block_number,\n data_table.value,\n '\\x0000000000000000000000000000000000000000'::bytea,\n '\\x'::bytea,\n data_table.tx_format,\n data_table.l1_tx_mint,\n data_table.l1_tx_refund_recipient,\n $21,\n data_table.index_in_block,\n NULLIF(data_table.error, ''),\n data_table.execution_info,\n data_table.refunded_gas,\n data_table.effective_gas_price,\n NOW(),\n NOW(),\n NOW()\n FROM\n (\n SELECT\n UNNEST($1::bytea[]) AS hash,\n UNNEST($2::bytea[]) AS initiator_address,\n UNNEST($3::NUMERIC[]) AS gas_limit,\n UNNEST($4::NUMERIC[]) AS max_fee_per_gas,\n UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit,\n UNNEST($6::jsonb[]) AS data,\n UNNEST($7::BIGINT[]) AS priority_op_id,\n UNNEST($8::NUMERIC[]) AS full_fee,\n UNNEST($9::NUMERIC[]) AS layer_2_tip_fee,\n UNNEST($10::bytea[]) AS contract_address,\n UNNEST($11::INT[]) AS l1_block_number,\n UNNEST($12::NUMERIC[]) AS value,\n UNNEST($13::INTEGER[]) AS tx_format,\n UNNEST($14::NUMERIC[]) AS l1_tx_mint,\n UNNEST($15::bytea[]) AS l1_tx_refund_recipient,\n UNNEST($16::INT[]) AS index_in_block,\n UNNEST($17::VARCHAR[]) AS error,\n UNNEST($18::jsonb[]) AS execution_info,\n UNNEST($19::BIGINT[]) AS refunded_gas,\n UNNEST($20::NUMERIC[]) AS effective_gas_price\n ) AS data_table\n ", "describe": { "columns": [], "parameters": { @@ -30,5 +30,5 @@ }, "nullable": [] }, - "hash": "a8d55f14de2f489fdc12c8da540e0d51f86d30d5ad92496e4aa087a3c4aae09f" + "hash": "785865beac89aff53fe5493d92cfa3245a75736d14b1ff0799f5f05cd4a247a4" } diff --git a/core/lib/dal/.sqlx/query-39849c30cdfa1827d69313357f92c495462e5aa430fe135d1db1f9bad1e72156.json b/core/lib/dal/.sqlx/query-96936c827caf26f93532bab309020b623d73e1b33024023f4bce7985e58be99e.json similarity index 67% rename from core/lib/dal/.sqlx/query-39849c30cdfa1827d69313357f92c495462e5aa430fe135d1db1f9bad1e72156.json rename to core/lib/dal/.sqlx/query-96936c827caf26f93532bab309020b623d73e1b33024023f4bce7985e58be99e.json index e196502056c..3d6663efb96 100644 --- a/core/lib/dal/.sqlx/query-39849c30cdfa1827d69313357f92c495462e5aa430fe135d1db1f9bad1e72156.json +++ b/core/lib/dal/.sqlx/query-96936c827caf26f93532bab309020b623d73e1b33024023f4bce7985e58be99e.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n VERSION,\n l1_batch_number,\n factory_deps_filepath,\n storage_logs_filepaths\n FROM\n snapshots\n ORDER BY\n l1_batch_number DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n VERSION,\n L1_BATCH_NUMBER,\n FACTORY_DEPS_FILEPATH,\n STORAGE_LOGS_FILEPATHS\n FROM\n SNAPSHOTS\n ORDER BY\n L1_BATCH_NUMBER DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -34,5 +34,5 @@ false ] }, - "hash": "39849c30cdfa1827d69313357f92c495462e5aa430fe135d1db1f9bad1e72156" + "hash": "96936c827caf26f93532bab309020b623d73e1b33024023f4bce7985e58be99e" } diff --git a/core/lib/dal/.sqlx/query-443b5c62c2c274369764ac5279d8f6f962ff3f07aa1cf7f0ffcefffccb633cdd.json b/core/lib/dal/.sqlx/query-99915c717193ff872b9cf8f5e9d1b84a6dfe1976b4ddb64cc288a846a0a7fda4.json similarity index 60% rename from core/lib/dal/.sqlx/query-443b5c62c2c274369764ac5279d8f6f962ff3f07aa1cf7f0ffcefffccb633cdd.json rename to core/lib/dal/.sqlx/query-99915c717193ff872b9cf8f5e9d1b84a6dfe1976b4ddb64cc288a846a0a7fda4.json index 3e2a8752dd2..4fe387aaa5b 100644 --- a/core/lib/dal/.sqlx/query-443b5c62c2c274369764ac5279d8f6f962ff3f07aa1cf7f0ffcefffccb633cdd.json +++ b/core/lib/dal/.sqlx/query-99915c717193ff872b9cf8f5e9d1b84a6dfe1976b4ddb64cc288a846a0a7fda4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n INDEX\n FROM\n initial_writes\n WHERE\n hashed_key = $1\n AND l1_batch_number <= $2\n ", + "query": "\n SELECT\n INDEX\n FROM\n INITIAL_WRITES\n WHERE\n HASHED_KEY = $1\n AND L1_BATCH_NUMBER <= $2\n ", "describe": { "columns": [ { @@ -19,5 +19,5 @@ false ] }, - "hash": "443b5c62c2c274369764ac5279d8f6f962ff3f07aa1cf7f0ffcefffccb633cdd" + "hash": "99915c717193ff872b9cf8f5e9d1b84a6dfe1976b4ddb64cc288a846a0a7fda4" } diff --git a/core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json b/core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json new file mode 100644 index 00000000000..bb0d73ee6c8 --- /dev/null +++ b/core/lib/dal/.sqlx/query-afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n processed_events (\n TYPE,\n chain_id,\n next_block_to_process\n )\n VALUES\n ($1, $2, $3)\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + { + "Custom": { + "name": "event_type", + "kind": { + "Enum": [ + "ProtocolUpgrades", + "PriorityTransactions" + ] + } + } + }, + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "afdeecb78e3af802c2b8ffb0f5ab9863082ccd1ce45b2d20e1119f1e78171d66" +} diff --git a/core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json b/core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json deleted file mode 100644 index 81981683e89..00000000000 --- a/core/lib/dal/.sqlx/query-b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n protocol_version\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "protocol_version", - "type_info": "Int4" - } - ], - "parameters": { - "Left": [ - "Bytea" - ] - }, - "nullable": [ - true - ] - }, - "hash": "b98e3790de305017c8fa5fba4c0c783b3710ee47f88edce1b17c2b8fa21dadd3" -} diff --git a/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json b/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json new file mode 100644 index 00000000000..0b1f56ef9f3 --- /dev/null +++ b/core/lib/dal/.sqlx/query-bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e.json @@ -0,0 +1,28 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version,\n index_in_block\n FROM\n transactions\n INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number\n WHERE\n transactions.hash = $1\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "index_in_block", + "type_info": "Int4" + } + ], + "parameters": { + "Left": [ + "Bytea" + ] + }, + "nullable": [ + true, + true + ] + }, + "hash": "bdfd7e9d4462ac9cf6f91fced84355e6aec05ba4af297a03169e3122a67ae53e" +} diff --git a/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json b/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json deleted file mode 100644 index 906cd108140..00000000000 --- a/core/lib/dal/.sqlx/query-c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n call_trace\n FROM\n call_traces\n INNER JOIN transactions ON tx_hash = transactions.hash\n WHERE\n transactions.miniblock_number = $1\n ORDER BY\n transactions.index_in_block\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "call_trace", - "type_info": "Bytea" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "c37432fabd092fa235fc70e11430fb28594859564a0f888eae748ad1f9fcede5" -} diff --git a/core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json b/core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json new file mode 100644 index 00000000000..b797ccb4604 --- /dev/null +++ b/core/lib/dal/.sqlx/query-c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5.json @@ -0,0 +1,26 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE processed_events\n SET\n next_block_to_process = $3\n WHERE\n TYPE = $1\n AND chain_id = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + { + "Custom": { + "name": "event_type", + "kind": { + "Enum": [ + "ProtocolUpgrades", + "PriorityTransactions" + ] + } + } + }, + "Int8", + "Int8" + ] + }, + "nullable": [] + }, + "hash": "c61682ed92c1a43855a991598d593639a63047f46dc8e605ff37b78f43f5cef5" +} diff --git a/core/lib/dal/.sqlx/query-4aeb7dcd79000540c03fb12c0608ab71005d70f13ed32172e47166f71b3aef80.json b/core/lib/dal/.sqlx/query-d08cb5418fe30fd7417f8bf0505c7fbb13c7719f73dba603053044b794d786eb.json similarity index 56% rename from core/lib/dal/.sqlx/query-4aeb7dcd79000540c03fb12c0608ab71005d70f13ed32172e47166f71b3aef80.json rename to core/lib/dal/.sqlx/query-d08cb5418fe30fd7417f8bf0505c7fbb13c7719f73dba603053044b794d786eb.json index feb8f29855e..677563873e0 100644 --- a/core/lib/dal/.sqlx/query-4aeb7dcd79000540c03fb12c0608ab71005d70f13ed32172e47166f71b3aef80.json +++ b/core/lib/dal/.sqlx/query-d08cb5418fe30fd7417f8bf0505c7fbb13c7719f73dba603053044b794d786eb.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n nonce\n FROM\n eth_txs\n WHERE\n from_addr IS NOT DISTINCT FROM $1 -- can't just use equality as NULL != NULL\\\n AND is_gateway = $2\n ORDER BY\n id DESC\n LIMIT\n 1\n ", + "query": "\n SELECT\n nonce\n FROM\n eth_txs\n WHERE\n -- can't just use equality as NULL != NULL\n from_addr IS NOT DISTINCT FROM $1\n AND is_gateway = $2\n ORDER BY\n id DESC\n LIMIT\n 1\n ", "describe": { "columns": [ { @@ -19,5 +19,5 @@ false ] }, - "hash": "4aeb7dcd79000540c03fb12c0608ab71005d70f13ed32172e47166f71b3aef80" + "hash": "d08cb5418fe30fd7417f8bf0505c7fbb13c7719f73dba603053044b794d786eb" } diff --git a/core/lib/dal/.sqlx/query-b75e3d2fecbf5d85e93848b7a35180abbd76956e073432af8d8500327b74e488.json b/core/lib/dal/.sqlx/query-d46fcdcb100b240aa5f2b23d07e6abe3514e755546ac39a5fddae03a3a435e3d.json similarity index 64% rename from core/lib/dal/.sqlx/query-b75e3d2fecbf5d85e93848b7a35180abbd76956e073432af8d8500327b74e488.json rename to core/lib/dal/.sqlx/query-d46fcdcb100b240aa5f2b23d07e6abe3514e755546ac39a5fddae03a3a435e3d.json index 91d7b677959..f9c72f3d34f 100644 --- a/core/lib/dal/.sqlx/query-b75e3d2fecbf5d85e93848b7a35180abbd76956e073432af8d8500327b74e488.json +++ b/core/lib/dal/.sqlx/query-d46fcdcb100b240aa5f2b23d07e6abe3514e755546ac39a5fddae03a3a435e3d.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n VERSION\n FROM\n compiler_versions\n WHERE\n compiler = $1\n ORDER BY\n VERSION\n ", + "query": "\n SELECT\n VERSION\n FROM\n COMPILER_VERSIONS\n WHERE\n COMPILER = $1\n ORDER BY\n VERSION\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "b75e3d2fecbf5d85e93848b7a35180abbd76956e073432af8d8500327b74e488" + "hash": "d46fcdcb100b240aa5f2b23d07e6abe3514e755546ac39a5fddae03a3a435e3d" } diff --git a/core/lib/dal/.sqlx/query-f1a90090c192d68367e799188356efe8d41759bbdcdd6d39db93208f2664f03a.json b/core/lib/dal/.sqlx/query-d7bd14e6550df54657c37d389c4ed79cde6d6d0a75702953b16df1a9748b11f5.json similarity index 64% rename from core/lib/dal/.sqlx/query-f1a90090c192d68367e799188356efe8d41759bbdcdd6d39db93208f2664f03a.json rename to core/lib/dal/.sqlx/query-d7bd14e6550df54657c37d389c4ed79cde6d6d0a75702953b16df1a9748b11f5.json index 616173355a9..5d47321965c 100644 --- a/core/lib/dal/.sqlx/query-f1a90090c192d68367e799188356efe8d41759bbdcdd6d39db93208f2664f03a.json +++ b/core/lib/dal/.sqlx/query-d7bd14e6550df54657c37d389c4ed79cde6d6d0a75702953b16df1a9748b11f5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n INDEX\n FROM\n initial_writes\n WHERE\n hashed_key = $1\n ", + "query": "\n SELECT\n INDEX\n FROM\n INITIAL_WRITES\n WHERE\n HASHED_KEY = $1\n ", "describe": { "columns": [ { @@ -18,5 +18,5 @@ false ] }, - "hash": "f1a90090c192d68367e799188356efe8d41759bbdcdd6d39db93208f2664f03a" + "hash": "d7bd14e6550df54657c37d389c4ed79cde6d6d0a75702953b16df1a9748b11f5" } diff --git a/core/lib/dal/.sqlx/query-dcfe6ef35a598098276d2cc69a05320b4081c66e965f359d70fba7670b88a5f6.json b/core/lib/dal/.sqlx/query-dcfe6ef35a598098276d2cc69a05320b4081c66e965f359d70fba7670b88a5f6.json new file mode 100644 index 00000000000..183fe4a0ab2 --- /dev/null +++ b/core/lib/dal/.sqlx/query-dcfe6ef35a598098276d2cc69a05320b4081c66e965f359d70fba7670b88a5f6.json @@ -0,0 +1,22 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n MAX(INDEX) AS \"max?\"\n FROM\n INITIAL_WRITES\n WHERE\n L1_BATCH_NUMBER = (\n SELECT\n MAX(L1_BATCH_NUMBER) AS \"max?\"\n FROM\n INITIAL_WRITES\n WHERE\n L1_BATCH_NUMBER <= $1\n )\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "max?", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [ + "Int8" + ] + }, + "nullable": [ + null + ] + }, + "hash": "dcfe6ef35a598098276d2cc69a05320b4081c66e965f359d70fba7670b88a5f6" +} diff --git a/core/lib/dal/.sqlx/query-a48c92f557e5e3a2674ce0dee9cd92f5a547150590b8c221c4065eab11175c7a.json b/core/lib/dal/.sqlx/query-ea4a4f5e0b2abe5e8f2745cf42e51063eae38da5d4b874f74c57b26f188fdeea.json similarity index 69% rename from core/lib/dal/.sqlx/query-a48c92f557e5e3a2674ce0dee9cd92f5a547150590b8c221c4065eab11175c7a.json rename to core/lib/dal/.sqlx/query-ea4a4f5e0b2abe5e8f2745cf42e51063eae38da5d4b874f74c57b26f188fdeea.json index 49e547e5564..53ba11c6dba 100644 --- a/core/lib/dal/.sqlx/query-a48c92f557e5e3a2674ce0dee9cd92f5a547150590b8c221c4065eab11175c7a.json +++ b/core/lib/dal/.sqlx/query-ea4a4f5e0b2abe5e8f2745cf42e51063eae38da5d4b874f74c57b26f188fdeea.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n MAX(INDEX) AS \"max?\"\n FROM\n initial_writes\n ", + "query": "\n SELECT\n MAX(INDEX) AS \"max?\"\n FROM\n INITIAL_WRITES\n ", "describe": { "columns": [ { @@ -16,5 +16,5 @@ null ] }, - "hash": "a48c92f557e5e3a2674ce0dee9cd92f5a547150590b8c221c4065eab11175c7a" + "hash": "ea4a4f5e0b2abe5e8f2745cf42e51063eae38da5d4b874f74c57b26f188fdeea" } diff --git a/core/lib/dal/Cargo.toml b/core/lib/dal/Cargo.toml index 9c13eeb3014..ccca49525e4 100644 --- a/core/lib/dal/Cargo.toml +++ b/core/lib/dal/Cargo.toml @@ -19,6 +19,7 @@ zksync_utils.workspace = true zksync_system_constants.workspace = true zksync_contracts.workspace = true zksync_types.workspace = true +zksync_concurrency.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_storage.workspace = true zksync_protobuf.workspace = true diff --git a/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.down.sql b/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.down.sql new file mode 100644 index 00000000000..79331481f58 --- /dev/null +++ b/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.down.sql @@ -0,0 +1,4 @@ +DROP TABLE IF EXISTS processed_events; + +DROP TYPE IF EXISTS event_type; + diff --git a/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.up.sql b/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.up.sql new file mode 100644 index 00000000000..be8a2b89a02 --- /dev/null +++ b/core/lib/dal/migrations/20240912085550_add_eth_watcher_progress_table.up.sql @@ -0,0 +1,9 @@ +CREATE TYPE event_type AS ENUM ('ProtocolUpgrades', 'PriorityTransactions'); + +CREATE TABLE processed_events +( + type event_type NOT NULL, + chain_id BIGINT NOT NULL, + next_block_to_process BIGINT NOT NULL, + PRIMARY KEY (chain_id, type) +) diff --git a/core/lib/dal/src/blocks_dal.rs b/core/lib/dal/src/blocks_dal.rs index 0eebde70164..ac381a0d9bc 100644 --- a/core/lib/dal/src/blocks_dal.rs +++ b/core/lib/dal/src/blocks_dal.rs @@ -994,8 +994,11 @@ impl BlocksDal<'_, '_> { ON CONFLICT (l1_batch_number) DO NOTHING "#, i64::from(number.0), - commitment_artifacts.aux_commitments.map(|a| a.events_queue_commitment.0.to_vec()), - commitment_artifacts.aux_commitments + commitment_artifacts + .aux_commitments + .map(|a| a.events_queue_commitment.0.to_vec()), + commitment_artifacts + .aux_commitments .map(|a| a.bootloader_initial_content_commitment.0.to_vec()), ) .instrument("save_batch_aux_commitments") @@ -1464,7 +1467,7 @@ impl BlocksDal<'_, '_> { AND eth_prove_tx_id IS NOT NULL AND eth_execute_tx_id IS NULL AND EXTRACT( - epoch + EPOCH FROM commit_tx.confirmed_at ) < $1 diff --git a/core/lib/dal/src/blocks_web3_dal.rs b/core/lib/dal/src/blocks_web3_dal.rs index 36a4acc0a6d..3d17a919a07 100644 --- a/core/lib/dal/src/blocks_web3_dal.rs +++ b/core/lib/dal/src/blocks_web3_dal.rs @@ -527,7 +527,7 @@ impl BlocksWeb3Dal<'_, '_> { pub async fn get_traces_for_l2_block( &mut self, block_number: L2BlockNumber, - ) -> DalResult> { + ) -> DalResult> { let protocol_version = sqlx::query!( r#" SELECT @@ -554,6 +554,8 @@ impl BlocksWeb3Dal<'_, '_> { CallTrace, r#" SELECT + transactions.hash AS tx_hash, + transactions.index_in_block AS tx_index_in_block, call_trace FROM call_traces @@ -570,7 +572,11 @@ impl BlocksWeb3Dal<'_, '_> { .fetch_all(self.storage) .await? .into_iter() - .map(|call_trace| call_trace.into_call(protocol_version)) + .map(|call_trace| { + let hash = H256::from_slice(&call_trace.tx_hash); + let index = call_trace.tx_index_in_block.unwrap_or_default() as usize; + (call_trace.into_call(protocol_version), hash, index) + }) .collect()) } @@ -1084,8 +1090,9 @@ mod tests { .await .unwrap(); assert_eq!(traces.len(), 2); - for (trace, tx_result) in traces.iter().zip(&tx_results) { + for ((trace, hash, _index), tx_result) in traces.iter().zip(&tx_results) { let expected_trace = tx_result.call_trace().unwrap(); + assert_eq!(&tx_result.hash, hash); assert_eq!(*trace, expected_trace); } } diff --git a/core/lib/dal/src/consensus/mod.rs b/core/lib/dal/src/consensus/mod.rs index f0ef336bc54..f01655d56a9 100644 --- a/core/lib/dal/src/consensus/mod.rs +++ b/core/lib/dal/src/consensus/mod.rs @@ -5,8 +5,11 @@ mod testonly; #[cfg(test)] mod tests; +use std::collections::BTreeMap; + use anyhow::{anyhow, Context as _}; -use zksync_consensus_roles::{attester, validator}; +use zksync_concurrency::net; +use zksync_consensus_roles::{attester, node, validator}; use zksync_protobuf::{read_required, required, ProtoFmt, ProtoRepr}; use zksync_types::{ abi, ethabi, @@ -27,6 +30,23 @@ use crate::models::{parse_h160, parse_h256}; pub struct GlobalConfig { pub genesis: validator::Genesis, pub registry_address: Option, + pub seed_peers: BTreeMap, +} + +impl ProtoRepr for proto::NodeAddr { + type Type = (node::PublicKey, net::Host); + fn read(&self) -> anyhow::Result { + Ok(( + read_required(&self.key).context("key")?, + net::Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0.build()), + addr: Some(this.1 .0.clone()), + } + } } impl ProtoFmt for GlobalConfig { @@ -41,6 +61,13 @@ impl ProtoFmt for GlobalConfig { .map(|a| parse_h160(a)) .transpose() .context("registry_address")?, + seed_peers: r + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, }) } @@ -48,6 +75,11 @@ impl ProtoFmt for GlobalConfig { Self::Proto { genesis: Some(self.genesis.build()), registry_address: self.registry_address.map(|a| a.as_bytes().to_vec()), + seed_peers: self + .seed_peers + .iter() + .map(|(k, v)| ProtoRepr::build(&(k.clone(), v.clone()))) + .collect(), } } } @@ -401,9 +433,10 @@ impl ProtoRepr for proto::Transaction { } }, execute: Execute { - contract_address: required(&execute.contract_address) - .and_then(|x| parse_h160(x)) - .context("execute.contract_address")?, + contract_address: execute + .contract_address + .as_ref() + .and_then(|x| parse_h160(x).ok()), calldata: required(&execute.calldata).context("calldata")?.clone(), value: required(&execute.value) .and_then(|x| parse_h256(x)) @@ -487,7 +520,7 @@ impl ProtoRepr for proto::Transaction { } }; let execute = proto::Execute { - contract_address: Some(this.execute.contract_address.as_bytes().into()), + contract_address: this.execute.contract_address.map(|x| x.as_bytes().into()), calldata: Some(this.execute.calldata.clone()), value: Some(u256_to_h256(this.execute.value).as_bytes().into()), factory_deps: this.execute.factory_deps.clone(), diff --git a/core/lib/dal/src/consensus/proto/mod.proto b/core/lib/dal/src/consensus/proto/mod.proto index da9151f10f4..ab1245f3ef6 100644 --- a/core/lib/dal/src/consensus/proto/mod.proto +++ b/core/lib/dal/src/consensus/proto/mod.proto @@ -4,6 +4,7 @@ package zksync.dal; import "zksync/roles/validator.proto"; import "zksync/roles/attester.proto"; +import "zksync/roles/node.proto"; message Payload { // zksync-era ProtocolVersionId @@ -102,7 +103,7 @@ message ProtocolUpgradeTxCommonData { } message Execute { - optional bytes contract_address = 1; // required; H160 + optional bytes contract_address = 1; // optional; H160 optional bytes calldata = 2; // required optional bytes value = 3; // required; U256 repeated bytes factory_deps = 4; @@ -122,9 +123,15 @@ message AttesterCommittee { repeated roles.attester.WeightedAttester members = 1; // required } +message NodeAddr { + optional roles.node.PublicKey key = 1; // required + optional string addr = 2; // required; Host +} + message GlobalConfig { optional roles.validator.Genesis genesis = 1; // required optional bytes registry_address = 2; // optional; H160 + repeated NodeAddr seed_peers = 3; } message AttestationStatus { diff --git a/core/lib/dal/src/consensus/tests.rs b/core/lib/dal/src/consensus/tests.rs index f21d09290a2..7059f1a74ea 100644 --- a/core/lib/dal/src/consensus/tests.rs +++ b/core/lib/dal/src/consensus/tests.rs @@ -17,7 +17,7 @@ use crate::tests::mock_protocol_upgrade_transaction; fn execute(rng: &mut impl Rng) -> Execute { Execute { - contract_address: rng.gen(), + contract_address: Some(rng.gen()), value: rng.gen::().into(), calldata: (0..10 * 32).map(|_| rng.gen()).collect(), // TODO: find a way to generate valid random bytecode. diff --git a/core/lib/dal/src/consensus_dal.rs b/core/lib/dal/src/consensus_dal.rs index 2dca58e2a6a..f2e499ce562 100644 --- a/core/lib/dal/src/consensus_dal.rs +++ b/core/lib/dal/src/consensus_dal.rs @@ -6,7 +6,6 @@ use zksync_db_connection::{ error::{DalError, DalResult, SqlxContext}, instrument::{InstrumentExt, Instrumented}, }; -use zksync_protobuf::ProtoRepr as _; use zksync_types::L2BlockNumber; pub use crate::consensus::{proto, AttestationStatus, GlobalConfig, Payload}; @@ -55,17 +54,18 @@ impl ConsensusDal<'_, '_> { else { return Ok(None); }; + let d = zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + }; if let Some(global_config) = row.global_config { - return Ok(Some( - zksync_protobuf::serde::deserialize(&global_config).context("global_config")?, - )); + return Ok(Some(d.proto_fmt(&global_config).context("global_config")?)); } if let Some(genesis) = row.genesis { - let genesis: validator::Genesis = - zksync_protobuf::serde::deserialize(&genesis).context("genesis")?; + let genesis: validator::Genesis = d.proto_fmt(&genesis).context("genesis")?; return Ok(Some(GlobalConfig { genesis, registry_address: None, + seed_peers: [].into(), })); } Ok(None) @@ -79,35 +79,65 @@ impl ConsensusDal<'_, '_> { /// Resets the stored consensus state otherwise and purges all certificates. pub async fn try_update_global_config(&mut self, want: &GlobalConfig) -> anyhow::Result<()> { let mut txn = self.storage.start_transaction().await?; - if let Some(got) = txn.consensus_dal().global_config().await? { - // Exit if the genesis didn't change. - if &got == want { + let got = txn.consensus_dal().global_config().await?; + if let Some(got) = &got { + // Exit if the global config didn't change. + if got == want { + return Ok(()); + } + // If genesis didn't change, just update the config. + if got.genesis == want.genesis { + let s = zksync_protobuf::serde::Serialize; + let global_config = s.proto_fmt(want, serde_json::value::Serializer).unwrap(); + sqlx::query!( + r#" + UPDATE consensus_replica_state + SET + global_config = $1 + "#, + global_config, + ) + .instrument("try_update_global_config#UPDATE consensus_replica_state") + .execute(&mut txn) + .await?; + txn.commit().await?; return Ok(()); } + + // Verify the genesis change. anyhow::ensure!( got.genesis.chain_id == want.genesis.chain_id, "changing chain_id is not allowed: old = {:?}, new = {:?}", got.genesis.chain_id, want.genesis.chain_id, ); + // Note that it may happen that the fork number didn't change, + // in case the binary was updated to support more fields in genesis struct. + // In such a case, the old binary was not able to connect to the consensus network, + // because of the genesis hash mismatch. + // TODO: Perhaps it would be better to deny unknown fields in the genesis instead. + // It would require embedding the genesis either as a json string or protobuf bytes within + // the global config, so that the global config can be parsed with + // `deny_unknown_fields:false` while genesis would be parsed with + // `deny_unknown_fields:true`. anyhow::ensure!( - got.genesis.fork_number < want.genesis.fork_number, + got.genesis.fork_number <= want.genesis.fork_number, "transition to a past fork is not allowed: old = {:?}, new = {:?}", got.genesis.fork_number, want.genesis.fork_number, ); want.genesis.verify().context("genesis.verify()")?; } - let genesis = - zksync_protobuf::serde::serialize(&want.genesis, serde_json::value::Serializer) - .unwrap(); - let global_config = - zksync_protobuf::serde::serialize(want, serde_json::value::Serializer).unwrap(); - let state = zksync_protobuf::serde::serialize( - &ReplicaState::default(), - serde_json::value::Serializer, - ) - .unwrap(); + + // Reset the consensus state. + let s = zksync_protobuf::serde::Serialize; + let genesis = s + .proto_fmt(&want.genesis, serde_json::value::Serializer) + .unwrap(); + let global_config = s.proto_fmt(want, serde_json::value::Serializer).unwrap(); + let state = s + .proto_fmt(&ReplicaState::default(), serde_json::value::Serializer) + .unwrap(); sqlx::query!( r#" DELETE FROM l1_batches_consensus @@ -184,6 +214,7 @@ impl ConsensusDal<'_, '_> { } .with_hash(), registry_address: old.registry_address, + seed_peers: old.seed_peers, }; txn.consensus_dal().try_update_global_config(&new).await?; txn.commit().await?; @@ -202,7 +233,13 @@ impl ConsensusDal<'_, '_> { fake_key "# ) - .try_map(|row| zksync_protobuf::serde::deserialize(row.state).decode_column("state")) + .try_map(|row| { + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt(row.state) + .decode_column("state") + }) .instrument("replica_state") .fetch_one(self.storage) .await @@ -210,8 +247,9 @@ impl ConsensusDal<'_, '_> { /// Sets the current BFT replica state. pub async fn set_replica_state(&mut self, state: &ReplicaState) -> DalResult<()> { - let state_json = - zksync_protobuf::serde::serialize(state, serde_json::value::Serializer).unwrap(); + let state_json = zksync_protobuf::serde::Serialize + .proto_fmt(state, serde_json::value::Serializer) + .unwrap(); sqlx::query!( r#" UPDATE consensus_replica_state @@ -301,7 +339,12 @@ impl ConsensusDal<'_, '_> { Ok(BlockStoreState { first: start, last: row - .map(|row| zksync_protobuf::serde::deserialize(row.certificate)) + .map(|row| { + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt(row.certificate) + }) .transpose()?, }) } @@ -329,7 +372,12 @@ impl ConsensusDal<'_, '_> { else { return Ok(None); }; - Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) + Ok(Some( + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt(row.certificate)?, + )) } /// Fetches the attester certificate for the L1 batch with the given `batch_number`. @@ -355,7 +403,12 @@ impl ConsensusDal<'_, '_> { else { return Ok(None); }; - Ok(Some(zksync_protobuf::serde::deserialize(row.certificate)?)) + Ok(Some( + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt(row.certificate)?, + )) } /// Fetches a range of L2 blocks from storage and converts them to `Payload`s. @@ -431,7 +484,9 @@ impl ConsensusDal<'_, '_> { ($1, $2) "#, i64::try_from(header.number.0).context("overflow")?, - zksync_protobuf::serde::serialize(cert, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::Serialize + .proto_fmt(cert, serde_json::value::Serializer) + .unwrap(), ) .instrument("insert_block_certificate") .report_latency() @@ -446,10 +501,9 @@ impl ConsensusDal<'_, '_> { number: attester::BatchNumber, committee: &attester::Committee, ) -> anyhow::Result<()> { - let committee = proto::AttesterCommittee::build(committee); - let committee = - zksync_protobuf::serde::serialize_proto(&committee, serde_json::value::Serializer) - .unwrap(); + let committee = zksync_protobuf::serde::Serialize + .proto_repr::(committee, serde_json::value::Serializer) + .unwrap(); sqlx::query!( r#" INSERT INTO @@ -496,10 +550,11 @@ impl ConsensusDal<'_, '_> { else { return Ok(None); }; - let raw = zksync_protobuf::serde::deserialize_proto(&row.attesters) - .context("deserialize_proto()")?; Ok(Some( - proto::AttesterCommittee::read(&raw).context("read()")?, + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_repr::(row.attesters)?, )) } @@ -533,7 +588,9 @@ impl ConsensusDal<'_, '_> { "#, i64::try_from(cert.message.number.0).context("overflow")?, // Unwrap is ok, because serialization should always succeed. - zksync_protobuf::serde::serialize(cert, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::Serialize + .proto_fmt(cert, serde_json::value::Serializer) + .unwrap(), ) .instrument("insert_batch_certificate") .report_latency() @@ -681,6 +738,7 @@ mod tests { let cfg = GlobalConfig { genesis: genesis.with_hash(), registry_address: Some(rng.gen()), + seed_peers: [].into(), // TODO: rng.gen() for Host }; conn.consensus_dal() .try_update_global_config(&cfg) @@ -715,6 +773,7 @@ mod tests { let cfg = GlobalConfig { genesis: setup.genesis.clone(), registry_address: Some(rng.gen()), + seed_peers: [].into(), }; conn.consensus_dal() .try_update_global_config(&cfg) diff --git a/core/lib/dal/src/contract_verification_dal.rs b/core/lib/dal/src/contract_verification_dal.rs index 194d85323b6..97af7880d16 100644 --- a/core/lib/dal/src/contract_verification_dal.rs +++ b/core/lib/dal/src/contract_verification_dal.rs @@ -375,9 +375,9 @@ impl ContractVerificationDal<'_, '_> { SELECT VERSION FROM - compiler_versions + COMPILER_VERSIONS WHERE - compiler = $1 + COMPILER = $1 ORDER BY VERSION "#, diff --git a/core/lib/dal/src/eth_sender_dal.rs b/core/lib/dal/src/eth_sender_dal.rs index 2266d6fb60f..a66a513175b 100644 --- a/core/lib/dal/src/eth_sender_dal.rs +++ b/core/lib/dal/src/eth_sender_dal.rs @@ -636,7 +636,8 @@ impl EthSenderDal<'_, '_> { FROM eth_txs WHERE - from_addr IS NOT DISTINCT FROM $1 -- can't just use equality as NULL != NULL\ + -- can't just use equality as NULL != NULL + from_addr IS NOT DISTINCT FROM $1 AND is_gateway = $2 ORDER BY id DESC diff --git a/core/lib/dal/src/eth_watcher_dal.rs b/core/lib/dal/src/eth_watcher_dal.rs new file mode 100644 index 00000000000..3220868decf --- /dev/null +++ b/core/lib/dal/src/eth_watcher_dal.rs @@ -0,0 +1,154 @@ +use zksync_db_connection::{connection::Connection, error::DalResult, instrument::InstrumentExt}; +use zksync_types::SLChainId; + +use crate::Core; + +pub struct EthWatcherDal<'a, 'c> { + pub(crate) storage: &'a mut Connection<'c, Core>, +} + +#[derive(Debug, Copy, Clone, sqlx::Type)] +#[sqlx(type_name = "event_type")] +pub enum EventType { + ProtocolUpgrades, + PriorityTransactions, +} + +impl EthWatcherDal<'_, '_> { + // Returns last set value of next_block_to_process for given event_type and chain_id. + // If the value was missing, initializes it with provided next_block_to_process value + pub async fn get_or_set_next_block_to_process( + &mut self, + event_type: EventType, + chain_id: SLChainId, + next_block_to_process: u64, + ) -> DalResult { + let result = sqlx::query!( + r#" + SELECT + next_block_to_process + FROM + processed_events + WHERE + TYPE = $1 + AND chain_id = $2 + "#, + event_type as EventType, + chain_id.0 as i64 + ) + .instrument("get_or_set_next_block_to_process") + .with_arg("event_type", &event_type) + .with_arg("chain_id", &chain_id) + .fetch_optional(self.storage) + .await?; + + if let Some(row) = result { + Ok(row.next_block_to_process as u64) + } else { + sqlx::query!( + r#" + INSERT INTO + processed_events ( + TYPE, + chain_id, + next_block_to_process + ) + VALUES + ($1, $2, $3) + "#, + event_type as EventType, + chain_id.0 as i64, + next_block_to_process as i64 + ) + .instrument("get_or_set_next_block_to_process - insert") + .with_arg("event_type", &event_type) + .with_arg("chain_id", &chain_id) + .execute(self.storage) + .await?; + + Ok(next_block_to_process) + } + } + + pub async fn update_next_block_to_process( + &mut self, + event_type: EventType, + chain_id: SLChainId, + next_block_to_process: u64, + ) -> DalResult<()> { + sqlx::query!( + r#" + UPDATE processed_events + SET + next_block_to_process = $3 + WHERE + TYPE = $1 + AND chain_id = $2 + "#, + event_type as EventType, + chain_id.0 as i64, + next_block_to_process as i64 + ) + .instrument("update_next_block_to_process") + .with_arg("event_type", &event_type) + .with_arg("chain_id", &chain_id) + .execute(self.storage) + .await?; + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ConnectionPool, Core, CoreDal}; + + #[tokio::test] + async fn test_get_or_set_next_block_to_process_with_different_event_types() { + let pool = ConnectionPool::::test_pool().await; + let mut conn = pool.connection().await.unwrap(); + let mut dal = conn.processed_events_dal(); + + // Test with ProtocolUpgrades + let next_block = dal + .get_or_set_next_block_to_process(EventType::ProtocolUpgrades, SLChainId(1), 100) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 100); + + // Test with PriorityTransactions + let next_block = dal + .get_or_set_next_block_to_process(EventType::PriorityTransactions, SLChainId(1), 200) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 200); + + // Test with PriorityTransactions + let next_block = dal + .get_or_set_next_block_to_process(EventType::PriorityTransactions, SLChainId(2), 300) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 300); + + // Verify that the initial block is not updated for ProtocolUpgrades + let next_block = dal + .get_or_set_next_block_to_process(EventType::ProtocolUpgrades, SLChainId(1), 150) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 100); + + // Verify that the initial block is not updated for PriorityTransactions + let next_block = dal + .get_or_set_next_block_to_process(EventType::PriorityTransactions, SLChainId(1), 250) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 200); + + // Verify that the initial block is not updated for PriorityTransactions + let next_block = dal + .get_or_set_next_block_to_process(EventType::PriorityTransactions, SLChainId(2), 350) + .await + .expect("Failed to get or set next block to process"); + assert_eq!(next_block, 300); + } +} diff --git a/core/lib/dal/src/lib.rs b/core/lib/dal/src/lib.rs index 0e1badb9af7..f0d2f0c1671 100644 --- a/core/lib/dal/src/lib.rs +++ b/core/lib/dal/src/lib.rs @@ -15,8 +15,9 @@ use crate::{ base_token_dal::BaseTokenDal, blocks_dal::BlocksDal, blocks_web3_dal::BlocksWeb3Dal, consensus_dal::ConsensusDal, contract_verification_dal::ContractVerificationDal, data_availability_dal::DataAvailabilityDal, eth_sender_dal::EthSenderDal, - events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, factory_deps_dal::FactoryDepsDal, - proof_generation_dal::ProofGenerationDal, protocol_versions_dal::ProtocolVersionsDal, + eth_watcher_dal::EthWatcherDal, events_dal::EventsDal, events_web3_dal::EventsWeb3Dal, + factory_deps_dal::FactoryDepsDal, proof_generation_dal::ProofGenerationDal, + protocol_versions_dal::ProtocolVersionsDal, protocol_versions_web3_dal::ProtocolVersionsWeb3Dal, pruning_dal::PruningDal, snapshot_recovery_dal::SnapshotRecoveryDal, snapshots_creator_dal::SnapshotsCreatorDal, snapshots_dal::SnapshotsDal, storage_logs_dal::StorageLogsDal, @@ -35,6 +36,7 @@ pub mod consensus_dal; pub mod contract_verification_dal; mod data_availability_dal; pub mod eth_sender_dal; +pub mod eth_watcher_dal; pub mod events_dal; pub mod events_web3_dal; pub mod factory_deps_dal; @@ -132,6 +134,8 @@ where fn vm_runner_dal(&mut self) -> VmRunnerDal<'_, 'a>; fn base_token_dal(&mut self) -> BaseTokenDal<'_, 'a>; + + fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a>; } #[derive(Clone, Debug)] @@ -258,4 +262,8 @@ impl<'a> CoreDal<'a> for Connection<'a, Core> { fn base_token_dal(&mut self) -> BaseTokenDal<'_, 'a> { BaseTokenDal { storage: self } } + + fn processed_events_dal(&mut self) -> EthWatcherDal<'_, 'a> { + EthWatcherDal { storage: self } + } } diff --git a/core/lib/dal/src/models/storage_block.rs b/core/lib/dal/src/models/storage_block.rs index ee1e70390a1..11ff2a83b7b 100644 --- a/core/lib/dal/src/models/storage_block.rs +++ b/core/lib/dal/src/models/storage_block.rs @@ -250,7 +250,7 @@ impl TryFrom for L1BatchMetadata { bootloader_initial_content_commitment: batch .bootloader_initial_content_commitment .map(|v| H256::from_slice(&v)), - da_blob_id: batch.blob_id.map(|s| s.into_bytes()) + da_blob_id: batch.blob_id.map(|s| s.into_bytes()), }) } } diff --git a/core/lib/dal/src/models/storage_transaction.rs b/core/lib/dal/src/models/storage_transaction.rs index 9f67e9025e0..bb219ee1d61 100644 --- a/core/lib/dal/src/models/storage_transaction.rs +++ b/core/lib/dal/src/models/storage_transaction.rs @@ -561,32 +561,38 @@ impl StorageApiTransaction { #[derive(Debug, Clone, sqlx::FromRow)] pub(crate) struct CallTrace { pub call_trace: Vec, + pub tx_hash: Vec, + pub tx_index_in_block: Option, } impl CallTrace { pub(crate) fn into_call(self, protocol_version: ProtocolVersionId) -> Call { - if protocol_version.is_pre_1_5_0() { - if let Ok(legacy_call_trace) = bincode::deserialize::(&self.call_trace) { - legacy_call_trace.into() - } else { - let legacy_mixed_call_trace = - bincode::deserialize::(&self.call_trace) - .expect("Failed to deserialize call trace"); - legacy_mixed_call_trace.into() - } - } else { - bincode::deserialize(&self.call_trace).unwrap() - } + parse_call_trace(&self.call_trace, protocol_version) } +} - pub(crate) fn from_call(call: Call, protocol_version: ProtocolVersionId) -> Self { - let call_trace = if protocol_version.is_pre_1_5_0() { - bincode::serialize(&LegacyCall::try_from(call).unwrap()) +pub(crate) fn parse_call_trace(call_trace: &[u8], protocol_version: ProtocolVersionId) -> Call { + if protocol_version.is_pre_1_5_0() { + if let Ok(legacy_call_trace) = bincode::deserialize::(call_trace) { + legacy_call_trace.into() } else { - bincode::serialize(&call) + let legacy_mixed_call_trace = bincode::deserialize::(call_trace) + .expect("Failed to deserialize call trace"); + legacy_mixed_call_trace.into() } - .unwrap(); + } else { + bincode::deserialize(call_trace).unwrap() + } +} - Self { call_trace } +pub(crate) fn serialize_call_into_bytes( + call: Call, + protocol_version: ProtocolVersionId, +) -> Vec { + if protocol_version.is_pre_1_5_0() { + bincode::serialize(&LegacyCall::try_from(call).unwrap()) + } else { + bincode::serialize(&call) } + .unwrap() } diff --git a/core/lib/dal/src/models/tests.rs b/core/lib/dal/src/models/tests.rs index 34cfde108f1..b4949dc101d 100644 --- a/core/lib/dal/src/models/tests.rs +++ b/core/lib/dal/src/models/tests.rs @@ -13,7 +13,7 @@ use crate::{models::storage_transaction::StorageTransaction, BigDecimal}; fn default_execute() -> Execute { Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: U256::from(10i32), calldata: hex::decode( "a9059cbb00000000000000000000000058d595f318167d5af45d9e44ade4348dd4e\ diff --git a/core/lib/dal/src/snapshots_creator_dal.rs b/core/lib/dal/src/snapshots_creator_dal.rs index b076240173b..700e3812018 100644 --- a/core/lib/dal/src/snapshots_creator_dal.rs +++ b/core/lib/dal/src/snapshots_creator_dal.rs @@ -21,11 +21,11 @@ impl SnapshotsCreatorDal<'_, '_> { SELECT INDEX FROM - initial_writes + INITIAL_WRITES WHERE - l1_batch_number <= $1 + L1_BATCH_NUMBER <= $1 ORDER BY - l1_batch_number DESC, + L1_BATCH_NUMBER DESC, INDEX DESC LIMIT 1; diff --git a/core/lib/dal/src/snapshots_dal.rs b/core/lib/dal/src/snapshots_dal.rs index 009efd3da07..950fb6501a4 100644 --- a/core/lib/dal/src/snapshots_dal.rs +++ b/core/lib/dal/src/snapshots_dal.rs @@ -141,13 +141,13 @@ impl SnapshotsDal<'_, '_> { r#" SELECT VERSION, - l1_batch_number, - factory_deps_filepath, - storage_logs_filepaths + L1_BATCH_NUMBER, + FACTORY_DEPS_FILEPATH, + STORAGE_LOGS_FILEPATHS FROM - snapshots + SNAPSHOTS ORDER BY - l1_batch_number DESC + L1_BATCH_NUMBER DESC LIMIT 1 "# @@ -168,13 +168,13 @@ impl SnapshotsDal<'_, '_> { r#" SELECT VERSION, - l1_batch_number, - factory_deps_filepath, - storage_logs_filepaths + L1_BATCH_NUMBER, + FACTORY_DEPS_FILEPATH, + STORAGE_LOGS_FILEPATHS FROM - snapshots + SNAPSHOTS WHERE - l1_batch_number = $1 + L1_BATCH_NUMBER = $1 "#, l1_batch_number.0 as i32 ) diff --git a/core/lib/dal/src/storage_logs_dal.rs b/core/lib/dal/src/storage_logs_dal.rs index d5de66037b4..50f41f31333 100644 --- a/core/lib/dal/src/storage_logs_dal.rs +++ b/core/lib/dal/src/storage_logs_dal.rs @@ -688,9 +688,9 @@ impl StorageLogsDal<'_, '_> { SELECT COUNT(*) AS COUNT FROM - storage_logs + STORAGE_LOGS WHERE - miniblock_number <= $1 + MINIBLOCK_NUMBER <= $1 "#, i64::from(at_l2_block.0) ) diff --git a/core/lib/dal/src/storage_logs_dedup_dal.rs b/core/lib/dal/src/storage_logs_dedup_dal.rs index 02049f3e9ad..f02ac6c4cf4 100644 --- a/core/lib/dal/src/storage_logs_dedup_dal.rs +++ b/core/lib/dal/src/storage_logs_dedup_dal.rs @@ -154,7 +154,7 @@ impl StorageLogsDedupDal<'_, '_> { SELECT MAX(INDEX) AS "max?" FROM - initial_writes + INITIAL_WRITES "#, ) .instrument("max_enumeration_index") @@ -174,15 +174,15 @@ impl StorageLogsDedupDal<'_, '_> { SELECT MAX(INDEX) AS "max?" FROM - initial_writes + INITIAL_WRITES WHERE - l1_batch_number = ( + L1_BATCH_NUMBER = ( SELECT - MAX(l1_batch_number) AS "max?" + MAX(L1_BATCH_NUMBER) AS "max?" FROM - initial_writes + INITIAL_WRITES WHERE - l1_batch_number <= $1 + L1_BATCH_NUMBER <= $1 ) "#, i64::from(l1_batch_number.0) @@ -230,9 +230,9 @@ impl StorageLogsDedupDal<'_, '_> { SELECT INDEX FROM - initial_writes + INITIAL_WRITES WHERE - hashed_key = $1 + HASHED_KEY = $1 "#, hashed_key.as_bytes() ) @@ -253,10 +253,10 @@ impl StorageLogsDedupDal<'_, '_> { SELECT INDEX FROM - initial_writes + INITIAL_WRITES WHERE - hashed_key = $1 - AND l1_batch_number <= $2 + HASHED_KEY = $1 + AND L1_BATCH_NUMBER <= $2 "#, hashed_key.as_bytes(), l1_batch_number.0 as i32, diff --git a/core/lib/dal/src/system_dal.rs b/core/lib/dal/src/system_dal.rs index f6dda53b158..9ef375a8d56 100644 --- a/core/lib/dal/src/system_dal.rs +++ b/core/lib/dal/src/system_dal.rs @@ -28,7 +28,7 @@ impl SystemDal<'_, '_> { SELECT PG_LAST_WAL_RECEIVE_LSN() = PG_LAST_WAL_REPLAY_LSN() AS synced, EXTRACT( - SECONDS + seconds FROM NOW() - PG_LAST_XACT_REPLAY_TIMESTAMP() )::INT AS LAG diff --git a/core/lib/dal/src/tests/mod.rs b/core/lib/dal/src/tests/mod.rs index c17e8c5d1fe..dc672fa1f80 100644 --- a/core/lib/dal/src/tests/mod.rs +++ b/core/lib/dal/src/tests/mod.rs @@ -74,7 +74,7 @@ pub(crate) fn mock_l2_transaction() -> L2Tx { gas_per_pubdata_limit: U256::from(DEFAULT_GAS_PER_PUBDATA), }; let mut l2_tx = L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], zksync_types::Nonce(0), fee, @@ -110,7 +110,7 @@ pub(crate) fn mock_l1_execute() -> L1Tx { }; let execute = Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], @@ -138,7 +138,7 @@ pub(crate) fn mock_protocol_upgrade_transaction() -> ProtocolUpgradeTx { }; let execute = Execute { - contract_address: H160::random(), + contract_address: Some(H160::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], diff --git a/core/lib/dal/src/transactions_dal.rs b/core/lib/dal/src/transactions_dal.rs index 49791f776e0..9f63427491a 100644 --- a/core/lib/dal/src/transactions_dal.rs +++ b/core/lib/dal/src/transactions_dal.rs @@ -20,7 +20,9 @@ use zksync_vm_interface::{ }; use crate::{ - models::storage_transaction::{CallTrace, StorageTransaction}, + models::storage_transaction::{ + parse_call_trace, serialize_call_into_bytes, StorageTransaction, + }, Core, CoreDal, }; @@ -58,7 +60,8 @@ impl TransactionsDal<'_, '_> { tx: &L1Tx, l1_block_number: L1BlockNumber, ) -> DalResult<()> { - let contract_address = tx.execute.contract_address.as_bytes(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx_hash = tx.hash(); let tx_hash_bytes = tx_hash.as_bytes(); let json_data = serde_json::to_value(&tx.execute) @@ -143,7 +146,7 @@ impl TransactionsDal<'_, '_> { serial_id, full_fee, layer_2_tip_fee, - contract_address, + contract_address_as_bytes, l1_block_number.0 as i32, value, empty_address.as_bytes(), @@ -161,7 +164,8 @@ impl TransactionsDal<'_, '_> { } pub async fn insert_system_transaction(&mut self, tx: &ProtocolUpgradeTx) -> DalResult<()> { - let contract_address = tx.execute.contract_address.as_bytes().to_vec(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx_hash = tx.common_data.hash().0.to_vec(); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.common_data.hash())); @@ -238,7 +242,7 @@ impl TransactionsDal<'_, '_> { gas_per_pubdata_limit, json_data, upgrade_id, - contract_address, + contract_address_as_bytes, l1_block_number, value, &Address::default().0.to_vec(), @@ -284,7 +288,8 @@ impl TransactionsDal<'_, '_> { } let initiator_address = tx.initiator_account(); - let contract_address = tx.execute.contract_address.as_bytes(); + let contract_address = tx.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let json_data = serde_json::to_value(&tx.execute) .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())); let gas_limit = u256_to_big_decimal(tx.common_data.fee.gas_limit); @@ -413,7 +418,7 @@ impl TransactionsDal<'_, '_> { input_data, &json_data, tx_format, - contract_address, + contract_address_as_bytes, value, &paymaster, &paymaster_input, @@ -485,7 +490,7 @@ impl TransactionsDal<'_, '_> { ( SELECT UNNEST($1::INT[]) AS l1_batch_tx_index, - UNNEST($2::bytea[]) AS hash + UNNEST($2::BYTEA[]) AS hash ) AS data_table WHERE transactions.hash = data_table.hash @@ -518,8 +523,7 @@ impl TransactionsDal<'_, '_> { let mut bytea_call_traces = Vec::with_capacity(transactions.len()); for tx_res in transactions { if let Some(call_trace) = tx_res.call_trace() { - bytea_call_traces - .push(CallTrace::from_call(call_trace, protocol_version).call_trace); + bytea_call_traces.push(serialize_call_into_bytes(call_trace, protocol_version)); call_traces_tx_hashes.push(tx_res.hash.as_bytes()); } } @@ -697,8 +701,10 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); l2_values.push(u256_to_big_decimal(transaction.execute.value)); - l2_contract_addresses.push(transaction.execute.contract_address.as_bytes()); + l2_contract_addresses.push(contract_address_as_bytes); l2_paymaster_input.push(&common_data.paymaster_params.paymaster_input[..]); l2_paymaster.push(common_data.paymaster_params.paymaster.as_bytes()); l2_hashes.push(tx_res.hash.as_bytes()); @@ -818,7 +824,7 @@ impl TransactionsDal<'_, '_> { &l2_inputs as &[&[u8]], &l2_datas, &l2_tx_formats, - &l2_contract_addresses as &[&[u8]], + &l2_contract_addresses as &[Option>], &l2_values, &l2_paymaster as &[&[u8]], &l2_paymaster_input as &[&[u8]], @@ -901,8 +907,10 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); l2_values.push(u256_to_big_decimal(transaction.execute.value)); - l2_contract_addresses.push(transaction.execute.contract_address.as_bytes()); + l2_contract_addresses.push(contract_address_as_bytes); l2_paymaster_input.push(&common_data.paymaster_params.paymaster_input[..]); l2_paymaster.push(common_data.paymaster_params.paymaster.as_bytes()); l2_hashes.push(tx_res.hash.as_bytes()); @@ -1013,7 +1021,7 @@ impl TransactionsDal<'_, '_> { &l2_datas, &l2_refunded_gas, &l2_values, - &l2_contract_addresses as &[&[u8]], + &l2_contract_addresses as &[Option>], &l2_paymaster as &[&[u8]], &l2_paymaster_input as &[&[u8]], l2_block_number.0 as i32, @@ -1083,6 +1091,8 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx = &tx_res.transaction; l1_hashes.push(tx_res.hash.as_bytes()); l1_initiator_address.push(common_data.sender.as_bytes()); @@ -1096,7 +1106,7 @@ impl TransactionsDal<'_, '_> { l1_priority_op_id.push(common_data.serial_id.0 as i64); l1_full_fee.push(u256_to_big_decimal(common_data.full_fee)); l1_layer_2_tip_fee.push(u256_to_big_decimal(common_data.layer_2_tip_fee)); - l1_contract_address.push(tx.execute.contract_address.as_bytes()); + l1_contract_address.push(contract_address_as_bytes); l1_l1_block_number.push(common_data.eth_block as i32); l1_value.push(u256_to_big_decimal(tx.execute.value)); l1_tx_format.push(common_data.tx_format() as i32); @@ -1172,24 +1182,24 @@ impl TransactionsDal<'_, '_> { FROM ( SELECT - UNNEST($1::BYTEA[]) AS hash, - UNNEST($2::BYTEA[]) AS initiator_address, + UNNEST($1::bytea[]) AS hash, + UNNEST($2::bytea[]) AS initiator_address, UNNEST($3::NUMERIC[]) AS gas_limit, UNNEST($4::NUMERIC[]) AS max_fee_per_gas, UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit, - UNNEST($6::JSONB[]) AS data, + UNNEST($6::jsonb[]) AS data, UNNEST($7::BIGINT[]) AS priority_op_id, UNNEST($8::NUMERIC[]) AS full_fee, UNNEST($9::NUMERIC[]) AS layer_2_tip_fee, - UNNEST($10::BYTEA[]) AS contract_address, + UNNEST($10::bytea[]) AS contract_address, UNNEST($11::INT[]) AS l1_block_number, UNNEST($12::NUMERIC[]) AS value, UNNEST($13::INTEGER[]) AS tx_format, UNNEST($14::NUMERIC[]) AS l1_tx_mint, - UNNEST($15::BYTEA[]) AS l1_tx_refund_recipient, + UNNEST($15::bytea[]) AS l1_tx_refund_recipient, UNNEST($16::INT[]) AS index_in_block, UNNEST($17::VARCHAR[]) AS error, - UNNEST($18::JSONB[]) AS execution_info, + UNNEST($18::jsonb[]) AS execution_info, UNNEST($19::BIGINT[]) AS refunded_gas, UNNEST($20::NUMERIC[]) AS effective_gas_price ) AS data_table @@ -1203,7 +1213,7 @@ impl TransactionsDal<'_, '_> { &l1_priority_op_id, &l1_full_fee, &l1_layer_2_tip_fee, - &l1_contract_address as &[&[u8]], + &l1_contract_address as &[Option>], &l1_l1_block_number, &l1_value, &l1_tx_format, @@ -1373,6 +1383,8 @@ impl TransactionsDal<'_, '_> { .arg_error(&format!("transactions[{index_in_block}].refunded_gas"), err) })?; + let contract_address = transaction.execute.contract_address; + let contract_address_as_bytes = contract_address.map(|addr| addr.as_bytes().to_vec()); let tx = &tx_res.transaction; upgrade_hashes.push(tx_res.hash.as_bytes()); upgrade_initiator_address.push(common_data.sender.as_bytes()); @@ -1385,7 +1397,7 @@ impl TransactionsDal<'_, '_> { .unwrap_or_else(|_| panic!("cannot serialize tx {:?} to json", tx.hash())), ); upgrade_upgrade_id.push(common_data.upgrade_id as i32); - upgrade_contract_address.push(tx.execute.contract_address.as_bytes()); + upgrade_contract_address.push(contract_address_as_bytes); upgrade_l1_block_number.push(common_data.eth_block as i32); upgrade_value.push(u256_to_big_decimal(tx.execute.value)); upgrade_tx_format.push(common_data.tx_format() as i32); @@ -1457,22 +1469,22 @@ impl TransactionsDal<'_, '_> { FROM ( SELECT - UNNEST($1::BYTEA[]) AS hash, - UNNEST($2::BYTEA[]) AS initiator_address, + UNNEST($1::bytea[]) AS hash, + UNNEST($2::bytea[]) AS initiator_address, UNNEST($3::NUMERIC[]) AS gas_limit, UNNEST($4::NUMERIC[]) AS max_fee_per_gas, UNNEST($5::NUMERIC[]) AS gas_per_pubdata_limit, - UNNEST($6::JSONB[]) AS data, + UNNEST($6::jsonb[]) AS data, UNNEST($7::INT[]) AS upgrade_id, - UNNEST($8::BYTEA[]) AS contract_address, + UNNEST($8::bytea[]) AS contract_address, UNNEST($9::INT[]) AS l1_block_number, UNNEST($10::NUMERIC[]) AS value, UNNEST($11::INTEGER[]) AS tx_format, UNNEST($12::NUMERIC[]) AS l1_tx_mint, - UNNEST($13::BYTEA[]) AS l1_tx_refund_recipient, + UNNEST($13::bytea[]) AS l1_tx_refund_recipient, UNNEST($14::INT[]) AS index_in_block, UNNEST($15::VARCHAR[]) AS error, - UNNEST($16::JSONB[]) AS execution_info, + UNNEST($16::jsonb[]) AS execution_info, UNNEST($17::BIGINT[]) AS refunded_gas, UNNEST($18::NUMERIC[]) AS effective_gas_price ) AS data_table @@ -1484,7 +1496,7 @@ impl TransactionsDal<'_, '_> { &upgrade_gas_per_pubdata_limit, &upgrade_data, &upgrade_upgrade_id, - &upgrade_contract_address as &[&[u8]], + &upgrade_contract_address as &[Option>], &upgrade_l1_block_number, &upgrade_value, &upgrade_tx_format, @@ -2101,11 +2113,12 @@ impl TransactionsDal<'_, '_> { Ok(data) } - pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { + pub async fn get_call_trace(&mut self, tx_hash: H256) -> DalResult> { let row = sqlx::query!( r#" SELECT - protocol_version + protocol_version, + index_in_block FROM transactions INNER JOIN miniblocks ON transactions.miniblock_number = miniblocks.number @@ -2128,8 +2141,7 @@ impl TransactionsDal<'_, '_> { .map(|v| (v as u16).try_into().unwrap()) .unwrap_or_else(ProtocolVersionId::last_potentially_undefined); - Ok(sqlx::query_as!( - CallTrace, + Ok(sqlx::query!( r#" SELECT call_trace @@ -2144,7 +2156,12 @@ impl TransactionsDal<'_, '_> { .with_arg("tx_hash", &tx_hash) .fetch_optional(self.storage) .await? - .map(|call_trace| call_trace.into_call(protocol_version))) + .map(|call_trace| { + ( + parse_call_trace(&call_trace.call_trace, protocol_version), + row.index_in_block.unwrap_or_default() as usize, + ) + })) } pub(crate) async fn get_tx_by_hash(&mut self, hash: H256) -> DalResult> { @@ -2216,7 +2233,7 @@ mod tests { .await .unwrap(); - let call_trace = conn + let (call_trace, _) = conn .transactions_dal() .get_call_trace(tx_hash) .await diff --git a/core/lib/dal/src/transactions_web3_dal.rs b/core/lib/dal/src/transactions_web3_dal.rs index f5a3c492f8a..3ec1f7e6d0c 100644 --- a/core/lib/dal/src/transactions_web3_dal.rs +++ b/core/lib/dal/src/transactions_web3_dal.rs @@ -64,17 +64,17 @@ impl TransactionsWeb3Dal<'_, '_> { ) SELECT transactions.hash AS tx_hash, - transactions.index_in_block AS index_in_block, - transactions.l1_batch_tx_index AS l1_batch_tx_index, + transactions.index_in_block, + transactions.l1_batch_tx_index, transactions.miniblock_number AS "block_number!", - transactions.error AS error, - transactions.effective_gas_price AS effective_gas_price, - transactions.initiator_address AS initiator_address, + transactions.error, + transactions.effective_gas_price, + transactions.initiator_address, transactions.data -> 'to' AS "transfer_to?", transactions.data -> 'contractAddress' AS "execute_contract_address?", transactions.tx_format AS "tx_format?", - transactions.refunded_gas AS refunded_gas, - transactions.gas_limit AS gas_limit, + transactions.refunded_gas, + transactions.gas_limit, miniblocks.hash AS "block_hash", miniblocks.l1_batch_number AS "l1_batch_number?", events.topic4 AS "contract_address?", diff --git a/core/lib/env_config/src/api.rs b/core/lib/env_config/src/api.rs index c6485d54d6b..53efea9a784 100644 --- a/core/lib/env_config/src/api.rs +++ b/core/lib/env_config/src/api.rs @@ -69,6 +69,7 @@ mod tests { estimate_gas_scale_factor: 1.0f64, gas_price_scale_factor: 1.2, estimate_gas_acceptable_overestimation: 1000, + estimate_gas_optimize_search: false, max_tx_size: 1000000, vm_execution_cache_misses_limit: None, vm_concurrency_limit: Some(512), diff --git a/core/lib/env_config/src/da_client.rs b/core/lib/env_config/src/da_client.rs index 32c47dfc62b..ac88da6dc03 100644 --- a/core/lib/env_config/src/da_client.rs +++ b/core/lib/env_config/src/da_client.rs @@ -1,6 +1,11 @@ -use zksync_config::configs::da_client::{ - DAClient, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, EIGENDA_CLIENT_CONFIG_NAME, - OBJECT_STORE_CLIENT_CONFIG_NAME, +use std::env; + +use zksync_config::configs::{ + da_client::{ + avail::AvailSecrets, DAClientConfig, AVAIL_CLIENT_CONFIG_NAME, EIGENDA_CLIENT_CONFIG_NAME, + OBJECT_STORE_CLIENT_CONFIG_NAME, + }, + secrets::DataAvailabilitySecrets, }; use crate::{envy_load, FromEnv}; @@ -8,18 +13,36 @@ use crate::{envy_load, FromEnv}; impl FromEnv for DAClientConfig { fn from_env() -> anyhow::Result { let client_tag = std::env::var("DA_CLIENT")?; - let client = match client_tag.as_str() { - AVAIL_CLIENT_CONFIG_NAME => DAClient::Avail(envy_load("da_avail_config", "DA_")?), + let config = match client_tag.as_str() { + AVAIL_CLIENT_CONFIG_NAME => Self::Avail(envy_load("da_avail_config", "DA_")?), OBJECT_STORE_CLIENT_CONFIG_NAME => { - DAClient::ObjectStore(envy_load("da_object_store", "DA_")?) + Self::ObjectStore(envy_load("da_object_store", "DA_")?) } EIGENDA_CLIENT_CONFIG_NAME => { - DAClient::EigenDA(envy_load("eigen_da_client", "EIGEN_DA_CLIENT_")?) + DAClientConfig::EigenDA(envy_load("eigen_da_client", "EIGEN_DA_CLIENT_")?) + } + _ => anyhow::bail!("Unknown DA client name: {}", client_tag), + }; + + Ok(config) + } +} + +impl FromEnv for DataAvailabilitySecrets { + fn from_env() -> anyhow::Result { + let client_tag = std::env::var("DA_CLIENT")?; + let secrets = match client_tag.as_str() { + AVAIL_CLIENT_CONFIG_NAME => { + let seed_phrase = env::var("DA_SECRETS_SEED_PHRASE") + .ok() + .map(|s| s.parse()) + .transpose()?; + Self::Avail(AvailSecrets { seed_phrase }) } _ => anyhow::bail!("Unknown DA client name: {}", client_tag), }; - Ok(Self { client }) + Ok(secrets) } } @@ -27,10 +50,10 @@ impl FromEnv for DAClientConfig { mod tests { use zksync_config::{ configs::{ - da_client::{DAClient, DAClient::ObjectStore}, + da_client::{DAClientConfig, DAClientConfig::ObjectStore}, object_store::ObjectStoreMode::GCS, }, - AvailConfig, DAClientConfig, ObjectStoreConfig, + AvailConfig, ObjectStoreConfig, }; use super::*; @@ -39,15 +62,13 @@ mod tests { static MUTEX: EnvMutex = EnvMutex::new(); fn expected_object_store_da_client_config(url: String, max_retries: u16) -> DAClientConfig { - DAClientConfig { - client: ObjectStore(ObjectStoreConfig { - mode: GCS { - bucket_base_url: url, - }, - max_retries, - local_mirror_path: None, - }), - } + ObjectStore(ObjectStoreConfig { + mode: GCS { + bucket_base_url: url, + }, + max_retries, + local_mirror_path: None, + }) } #[test] @@ -71,21 +92,17 @@ mod tests { fn expected_avail_da_layer_config( api_node_url: &str, bridge_api_url: &str, - seed: &str, app_id: u32, timeout: usize, max_retries: usize, ) -> DAClientConfig { - DAClientConfig { - client: DAClient::Avail(AvailConfig { - api_node_url: api_node_url.to_string(), - bridge_api_url: bridge_api_url.to_string(), - seed: seed.to_string(), - app_id, - timeout, - max_retries, - }), - } + DAClientConfig::Avail(AvailConfig { + api_node_url: api_node_url.to_string(), + bridge_api_url: bridge_api_url.to_string(), + app_id, + timeout, + max_retries, + }) } #[test] @@ -95,7 +112,6 @@ mod tests { DA_CLIENT="Avail" DA_API_NODE_URL="localhost:12345" DA_BRIDGE_API_URL="localhost:54321" - DA_SEED="bottom drive obey lake curtain smoke basket hold race lonely fit walk" DA_APP_ID="1" DA_TIMEOUT="2" DA_MAX_RETRIES="3" @@ -109,11 +125,32 @@ mod tests { expected_avail_da_layer_config( "localhost:12345", "localhost:54321", - "bottom drive obey lake curtain smoke basket hold race lonely fit walk", "1".parse::().unwrap(), "2".parse::().unwrap(), "3".parse::().unwrap(), ) ); } + + #[test] + fn from_env_avail_secrets() { + let mut lock = MUTEX.lock(); + let config = r#" + DA_CLIENT="Avail" + DA_SECRETS_SEED_PHRASE="bottom drive obey lake curtain smoke basket hold race lonely fit walk" + "#; + + lock.set_env(config); + + let actual = match DataAvailabilitySecrets::from_env().unwrap() { + DataAvailabilitySecrets::Avail(avail) => avail.seed_phrase, + }; + + assert_eq!( + actual.unwrap(), + "bottom drive obey lake curtain smoke basket hold race lonely fit walk" + .parse() + .unwrap() + ); + } } diff --git a/core/lib/env_config/src/eth_sender.rs b/core/lib/env_config/src/eth_sender.rs index 64e0a89d5a4..e5132eb7d91 100644 --- a/core/lib/env_config/src/eth_sender.rs +++ b/core/lib/env_config/src/eth_sender.rs @@ -72,6 +72,7 @@ mod tests { pubdata_sending_mode: PubdataSendingMode::Calldata, tx_aggregation_only_prove_and_execute: false, tx_aggregation_paused: false, + time_in_mempool_in_l1_blocks_cap: 2000, }), gas_adjuster: Some(GasAdjusterConfig { default_priority_fee_per_gas: 20000000000, @@ -131,6 +132,7 @@ mod tests { ETH_SENDER_SENDER_TIMESTAMP_CRITERIA_MAX_ALLOWED_LAG="30" ETH_SENDER_SENDER_MAX_AGGREGATED_TX_GAS="4000000" ETH_SENDER_SENDER_MAX_ETH_TX_DATA_SIZE="120000" + ETH_SENDER_SENDER_TIME_IN_MEMPOOL_IN_L1_BLOCKS_CAP="2000" ETH_SENDER_SENDER_L1_BATCH_MIN_AGE_BEFORE_EXECUTE_SECONDS="1000" ETH_SENDER_SENDER_MAX_ACCEPTABLE_PRIORITY_FEE_IN_GWEI="100000000000" ETH_SENDER_SENDER_PUBDATA_SENDING_MODE="Calldata" diff --git a/core/lib/env_config/src/prover_job_monitor.rs b/core/lib/env_config/src/prover_job_monitor.rs index 3a8f80473eb..884ebecacbb 100644 --- a/core/lib/env_config/src/prover_job_monitor.rs +++ b/core/lib/env_config/src/prover_job_monitor.rs @@ -31,6 +31,7 @@ mod tests { prover_queue_reporter_run_interval_ms: 10000, witness_generator_queue_reporter_run_interval_ms: 10000, witness_job_queuer_run_interval_ms: 10000, + http_port: 3074, } } @@ -55,6 +56,7 @@ mod tests { fn from_env_with_default() { let config = r#" PROVER_JOB_MONITOR_PROMETHEUS_PORT=3317 + PROVER_JOB_MONITOR_HTTP_PORT=3074 PROVER_JOB_MONITOR_MAX_DB_CONNECTIONS=9 "#; let mut lock = MUTEX.lock(); @@ -80,6 +82,7 @@ mod tests { PROVER_JOB_MONITOR_PROVER_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 PROVER_JOB_MONITOR_WITNESS_GENERATOR_QUEUE_REPORTER_RUN_INTERVAL_MS=10001 PROVER_JOB_MONITOR_WITNESS_JOB_QUEUER_RUN_INTERVAL_MS=10001 + PROVER_JOB_MONITOR_HTTP_PORT=3074 "#; let mut lock = MUTEX.lock(); lock.set_env(config); diff --git a/core/lib/eth_client/src/types.rs b/core/lib/eth_client/src/types.rs index 59fb1cdeddc..dd332351afb 100644 --- a/core/lib/eth_client/src/types.rs +++ b/core/lib/eth_client/src/types.rs @@ -320,7 +320,7 @@ pub struct FailureInfo { #[cfg(test)] mod tests { - use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; + use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_types::{ eth_sender::{EthTxBlobSidecarV1, SidecarBlobV1}, web3, K256PrivateKey, EIP_4844_TX_TYPE, H256, U256, U64, @@ -384,10 +384,7 @@ mod tests { .as_ref(), )]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction.clone()); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with @@ -493,10 +490,7 @@ mod tests { blob_versioned_hashes: Some(vec![versioned_hash_1, versioned_hash_2]), }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); let hash = web3::keccak256(&raw_tx).into(); // Transaction generated with https://github.com/inphi/blob-utils with diff --git a/core/lib/eth_signer/Cargo.toml b/core/lib/eth_signer/Cargo.toml index f760134e09b..92bb47824f3 100644 --- a/core/lib/eth_signer/Cargo.toml +++ b/core/lib/eth_signer/Cargo.toml @@ -11,10 +11,9 @@ keywords.workspace = true categories.workspace = true [dependencies] -zksync_types.workspace = true +zksync_basic_types.workspace = true +zksync_crypto_primitives.workspace = true + +async-trait.workspace = true rlp.workspace = true thiserror.workspace = true -async-trait.workspace = true - -[dev-dependencies] -tokio = { workspace = true, features = ["full"] } diff --git a/core/lib/eth_signer/src/error.rs b/core/lib/eth_signer/src/error.rs deleted file mode 100644 index 8b137891791..00000000000 --- a/core/lib/eth_signer/src/error.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/core/lib/eth_signer/src/lib.rs b/core/lib/eth_signer/src/lib.rs index 3a92d47b062..8b6025eb15d 100644 --- a/core/lib/eth_signer/src/lib.rs +++ b/core/lib/eth_signer/src/lib.rs @@ -1,5 +1,6 @@ use async_trait::async_trait; -use zksync_types::{Address, EIP712TypedStructure, Eip712Domain, PackedEthSignature}; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{EIP712TypedStructure, Eip712Domain, PackedEthSignature}; pub use crate::{pk_signer::PrivateKeySigner, raw_ethereum_tx::TransactionParameters}; diff --git a/core/lib/eth_signer/src/pk_signer.rs b/core/lib/eth_signer/src/pk_signer.rs index 47b0e110991..0f55425a0d5 100644 --- a/core/lib/eth_signer/src/pk_signer.rs +++ b/core/lib/eth_signer/src/pk_signer.rs @@ -1,5 +1,7 @@ -use zksync_types::{ - Address, EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, +use async_trait::async_trait; +use zksync_basic_types::Address; +use zksync_crypto_primitives::{ + EIP712TypedStructure, Eip712Domain, K256PrivateKey, PackedEthSignature, }; use crate::{ @@ -12,22 +14,20 @@ pub struct PrivateKeySigner { private_key: K256PrivateKey, } +// We define inherent methods duplicating `EthereumSigner` ones because they are sync and (other than `sign_typed_data`) infallible. impl PrivateKeySigner { pub fn new(private_key: K256PrivateKey) -> Self { Self { private_key } } -} -#[async_trait::async_trait] -impl EthereumSigner for PrivateKeySigner { - /// Get Ethereum address that matches the private key. - async fn get_address(&self) -> Result { - Ok(self.private_key.address()) + /// Gets an Ethereum address that matches this private key. + pub fn address(&self) -> Address { + self.private_key.address() } /// Signs typed struct using Ethereum private key by EIP-712 signature standard. /// Result of this function is the equivalent of RPC calling `eth_signTypedData`. - async fn sign_typed_data( + pub fn sign_typed_data( &self, domain: &Eip712Domain, typed_struct: &S, @@ -39,16 +39,11 @@ impl EthereumSigner for PrivateKeySigner { } /// Signs and returns the RLP-encoded transaction. - async fn sign_transaction( - &self, - raw_tx: TransactionParameters, - ) -> Result, SignerError> { + pub fn sign_transaction(&self, raw_tx: TransactionParameters) -> Vec { // According to the code in web3 // We should use `max_fee_per_gas` as `gas_price` if we use EIP1559 let gas_price = raw_tx.max_fee_per_gas; - let max_priority_fee_per_gas = raw_tx.max_priority_fee_per_gas; - let tx = Transaction { to: raw_tx.to, nonce: raw_tx.nonce, @@ -62,21 +57,42 @@ impl EthereumSigner for PrivateKeySigner { max_fee_per_blob_gas: raw_tx.max_fee_per_blob_gas, blob_versioned_hashes: raw_tx.blob_versioned_hashes, }; - let signed = tx.sign(&self.private_key, raw_tx.chain_id); - Ok(signed.raw_transaction.0) + signed.raw_transaction.0 + } +} + +#[async_trait] +impl EthereumSigner for PrivateKeySigner { + async fn get_address(&self) -> Result { + Ok(self.address()) + } + + async fn sign_typed_data( + &self, + domain: &Eip712Domain, + typed_struct: &S, + ) -> Result { + self.sign_typed_data(domain, typed_struct) + } + + async fn sign_transaction( + &self, + raw_tx: TransactionParameters, + ) -> Result, SignerError> { + Ok(self.sign_transaction(raw_tx)) } } #[cfg(test)] mod test { - use zksync_types::{K256PrivateKey, H160, H256, U256, U64}; + use zksync_basic_types::{H160, H256, U256, U64}; + use zksync_crypto_primitives::K256PrivateKey; - use super::PrivateKeySigner; - use crate::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; + use super::*; - #[tokio::test] - async fn test_generating_signed_raw_transaction() { + #[test] + fn test_generating_signed_raw_transaction() { let private_key = K256PrivateKey::from_bytes(H256::from([5; 32])).unwrap(); let signer = PrivateKeySigner::new(private_key); let raw_transaction = TransactionParameters { @@ -94,10 +110,7 @@ mod test { blob_versioned_hashes: None, max_fee_per_blob_gas: None, }; - let raw_tx = signer - .sign_transaction(raw_transaction.clone()) - .await - .unwrap(); + let raw_tx = signer.sign_transaction(raw_transaction); assert_ne!(raw_tx.len(), 1); // pre-calculated signature with right algorithm implementation let precalculated_raw_tx: Vec = vec![ diff --git a/core/lib/eth_signer/src/raw_ethereum_tx.rs b/core/lib/eth_signer/src/raw_ethereum_tx.rs index 9479b5bd9d7..bea64305b47 100644 --- a/core/lib/eth_signer/src/raw_ethereum_tx.rs +++ b/core/lib/eth_signer/src/raw_ethereum_tx.rs @@ -10,11 +10,11 @@ //! Link to @Deniallugo's PR to web3: https://github.com/tomusdrw/rust-web3/pull/630 use rlp::RlpStream; -use zksync_types::{ - ethabi::Address, +use zksync_basic_types::{ web3::{keccak256, AccessList, Signature, SignedTransaction}, - K256PrivateKey, H256, U256, U64, + Address, H256, U256, U64, }; +use zksync_crypto_primitives::K256PrivateKey; const LEGACY_TX_ID: u64 = 0; const ACCESSLISTS_TX_ID: u64 = 1; diff --git a/core/lib/external_price_api/Cargo.toml b/core/lib/external_price_api/Cargo.toml index 9539aa3fdc3..3eee675b4e6 100644 --- a/core/lib/external_price_api/Cargo.toml +++ b/core/lib/external_price_api/Cargo.toml @@ -24,3 +24,4 @@ rand.workspace = true zksync_config.workspace = true zksync_types.workspace = true tokio.workspace = true +httpmock.workspace = true diff --git a/core/lib/external_price_api/src/coingecko_api.rs b/core/lib/external_price_api/src/coingecko_api.rs index 8fa7514b368..cc882db95c3 100644 --- a/core/lib/external_price_api/src/coingecko_api.rs +++ b/core/lib/external_price_api/src/coingecko_api.rs @@ -49,6 +49,7 @@ impl CoinGeckoPriceAPIClient { } } + /// returns token price in ETH by token address. Returned value is X such that 1 TOKEN = X ETH. async fn get_token_price_by_address(&self, address: Address) -> anyhow::Result { let address_str = address_to_string(&address); let price_url = self @@ -87,11 +88,13 @@ impl CoinGeckoPriceAPIClient { impl PriceAPIClient for CoinGeckoPriceAPIClient { async fn fetch_ratio(&self, token_address: Address) -> anyhow::Result { let base_token_in_eth = self.get_token_price_by_address(token_address).await?; - let (numerator, denominator) = get_fraction(base_token_in_eth); + let (num_in_eth, denom_in_eth) = get_fraction(base_token_in_eth)?; + // take reciprocal of price as returned price is ETH/BaseToken and BaseToken/ETH is needed + let (num_in_base, denom_in_base) = (denom_in_eth, num_in_eth); return Ok(BaseTokenAPIRatio { - numerator, - denominator, + numerator: num_in_base, + denominator: denom_in_base, ratio_timestamp: Utc::now(), }); } @@ -110,3 +113,160 @@ impl CoinGeckoPriceResponse { .and_then(|price| price.get(currency)) } } + +#[cfg(test)] +mod test { + use httpmock::MockServer; + use zksync_config::configs::external_price_api_client::DEFAULT_TIMEOUT_MS; + + use super::*; + use crate::tests::*; + + fn get_mock_response(address: &str, price: f64) -> String { + format!("{{\"{}\":{{\"eth\":{}}}}}", address, price) + } + + #[test] + fn test_mock_response() { + // curl "https://api.coingecko.com/api/v3/simple/token_price/ethereum?contract_addresses=0x1f9840a85d5af5bf1d1762f925bdaddc4201f984&vs_currencies=eth" + // {"0x1f9840a85d5af5bf1d1762f925bdaddc4201f984":{"eth":0.00269512}} + assert_eq!( + get_mock_response("0x1f9840a85d5af5bf1d1762f925bdaddc4201f984", 0.00269512), + r#"{"0x1f9840a85d5af5bf1d1762f925bdaddc4201f984":{"eth":0.00269512}}"# + ) + } + + fn add_mock_by_address( + server: &MockServer, + // use string explicitly to verify that conversion of the address to string works as expected + address: String, + price: Option, + api_key: Option, + ) { + server.mock(|mut when, then| { + when = when + .method(httpmock::Method::GET) + .path("/api/v3/simple/token_price/ethereum"); + + when = when.query_param("contract_addresses", address.clone()); + when = when.query_param("vs_currencies", ETH_ID); + api_key.map(|key| when.header(COINGECKO_AUTH_HEADER, key)); + + if let Some(p) = price { + then.status(200).body(get_mock_response(&address, p)); + } else { + // requesting with invalid/unknown address results in empty json + // example: + // $ curl "https://api.coingecko.com/api/v3/simple/token_price/ethereum?contract_addresses=0x000000000000000000000000000000000000dead&vs_currencies=eth" + // {} + then.status(200).body("{}"); + }; + }); + } + + fn get_config(base_url: String, api_key: Option) -> ExternalPriceApiClientConfig { + ExternalPriceApiClientConfig { + base_url: Some(base_url), + api_key, + source: "coingecko".to_string(), + client_timeout_ms: DEFAULT_TIMEOUT_MS, + forced: None, + } + } + + fn happy_day_setup( + api_key: Option, + server: &MockServer, + address: Address, + base_token_price: f64, + ) -> SetupResult { + add_mock_by_address( + server, + address_to_string(&address), + Some(base_token_price), + api_key.clone(), + ); + SetupResult { + client: Box::new(CoinGeckoPriceAPIClient::new(get_config( + server.url(""), + api_key, + ))), + } + } + + #[tokio::test] + async fn test_happy_day_with_api_key() { + happy_day_test( + |server: &MockServer, address: Address, base_token_price: f64| { + happy_day_setup( + Some("test-key".to_string()), + server, + address, + base_token_price, + ) + }, + ) + .await + } + + #[tokio::test] + async fn test_happy_day_with_no_api_key() { + happy_day_test( + |server: &MockServer, address: Address, base_token_price: f64| { + happy_day_setup(None, server, address, base_token_price) + }, + ) + .await + } + + fn error_404_setup( + server: &MockServer, + _address: Address, + _base_token_price: f64, + ) -> SetupResult { + // just don't add mock + SetupResult { + client: Box::new(CoinGeckoPriceAPIClient::new(get_config( + server.url(""), + Some("FILLER".to_string()), + ))), + } + } + + #[tokio::test] + async fn test_error_404() { + let error_string = error_test(error_404_setup).await.to_string(); + assert!( + error_string + .starts_with("Http error while fetching token price. Status: 404 Not Found"), + "Error was: {}", + &error_string + ) + } + + fn error_missing_setup( + server: &MockServer, + address: Address, + _base_token_price: f64, + ) -> SetupResult { + let api_key = Some("FILLER".to_string()); + + add_mock_by_address(server, address_to_string(&address), None, api_key.clone()); + SetupResult { + client: Box::new(CoinGeckoPriceAPIClient::new(get_config( + server.url(""), + api_key, + ))), + } + } + + #[tokio::test] + async fn test_error_missing() { + let error_string = error_test(error_missing_setup).await.to_string(); + assert!( + error_string.starts_with("Price not found for token"), + "Error was: {}", + error_string + ) + } +} diff --git a/core/lib/external_price_api/src/forced_price_client.rs b/core/lib/external_price_api/src/forced_price_client.rs index fd166cdfd2d..a18c03fd8ca 100644 --- a/core/lib/external_price_api/src/forced_price_client.rs +++ b/core/lib/external_price_api/src/forced_price_client.rs @@ -7,7 +7,7 @@ use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; use crate::PriceAPIClient; -// Struct for a a forced price "client" (conversion ratio is always a configured "forced" ratio). +// Struct for a forced price "client" (conversion ratio is always a configured "forced" ratio). #[derive(Debug, Clone)] pub struct ForcedPriceClient { ratio: BaseTokenAPIRatio, diff --git a/core/lib/external_price_api/src/lib.rs b/core/lib/external_price_api/src/lib.rs index e86279dbe85..7a068f9b1cb 100644 --- a/core/lib/external_price_api/src/lib.rs +++ b/core/lib/external_price_api/src/lib.rs @@ -1,5 +1,7 @@ pub mod coingecko_api; pub mod forced_price_client; +#[cfg(test)] +mod tests; mod utils; use std::fmt; @@ -11,6 +13,8 @@ use zksync_types::{base_token_ratio::BaseTokenAPIRatio, Address}; #[async_trait] pub trait PriceAPIClient: Sync + Send + fmt::Debug + 'static { /// Returns the BaseToken<->ETH ratio for the input token address. + /// The returned value is rational number X such that X BaseToken = 1 ETH. + /// Example if 1 BaseToken = 0.002 ETH, then ratio is 500/1 (500 BaseToken = 1ETH) async fn fetch_ratio(&self, token_address: Address) -> anyhow::Result; } diff --git a/core/lib/external_price_api/src/tests.rs b/core/lib/external_price_api/src/tests.rs new file mode 100644 index 00000000000..bb2af866cf5 --- /dev/null +++ b/core/lib/external_price_api/src/tests.rs @@ -0,0 +1,56 @@ +use std::str::FromStr; + +use chrono::Utc; +use httpmock::MockServer; +use zksync_types::Address; + +use crate::PriceAPIClient; + +const TIME_TOLERANCE_MS: i64 = 100; +/// Uniswap (UNI) +const TEST_TOKEN_ADDRESS: &str = "0x1f9840a85d5af5bf1d1762f925bdaddc4201f984"; +/// 1UNI = 0.00269ETH +const TEST_TOKEN_PRICE_ETH: f64 = 0.00269; +/// 1ETH = 371.74UNI; When converting gas price from ETH to UNI +/// you need to multiply by this value. Thus, this should be equal to the ratio. +const TEST_BASE_PRICE: f64 = 371.74; +const PRICE_FLOAT_COMPARE_TOLERANCE: f64 = 0.1; + +pub(crate) struct SetupResult { + pub(crate) client: Box, +} + +pub(crate) type SetupFn = + fn(server: &MockServer, address: Address, base_token_price: f64) -> SetupResult; + +pub(crate) async fn happy_day_test(setup: SetupFn) { + let server = MockServer::start(); + let address_str = TEST_TOKEN_ADDRESS; + let address = Address::from_str(address_str).unwrap(); + + // APIs return token price in ETH (ETH per 1 token) + let SetupResult { client } = setup(&server, address, TEST_TOKEN_PRICE_ETH); + let api_price = client.fetch_ratio(address).await.unwrap(); + + // we expect the returned ratio to be such that when multiplying gas price in ETH you get gas + // price in base token. So we expect such ratio X that X Base = 1ETH + assert!( + ((api_price.numerator.get() as f64) / (api_price.denominator.get() as f64) + - TEST_BASE_PRICE) + .abs() + < PRICE_FLOAT_COMPARE_TOLERANCE + ); + assert!((Utc::now() - api_price.ratio_timestamp).num_milliseconds() <= TIME_TOLERANCE_MS); +} + +pub(crate) async fn error_test(setup: SetupFn) -> anyhow::Error { + let server = MockServer::start(); + let address_str = TEST_TOKEN_ADDRESS; + let address = Address::from_str(address_str).unwrap(); + + let SetupResult { client } = setup(&server, address, 1.0); + let api_price = client.fetch_ratio(address).await; + + assert!(api_price.is_err()); + api_price.err().unwrap() +} diff --git a/core/lib/external_price_api/src/utils.rs b/core/lib/external_price_api/src/utils.rs index 879be44e173..4b8abc39dff 100644 --- a/core/lib/external_price_api/src/utils.rs +++ b/core/lib/external_price_api/src/utils.rs @@ -3,13 +3,78 @@ use std::num::NonZeroU64; use fraction::Fraction; /// Using the base token price and eth price, calculate the fraction of the base token to eth. -pub fn get_fraction(ratio_f64: f64) -> (NonZeroU64, NonZeroU64) { +pub fn get_fraction(ratio_f64: f64) -> anyhow::Result<(NonZeroU64, NonZeroU64)> { let rate_fraction = Fraction::from(ratio_f64); + if rate_fraction.sign() == Some(fraction::Sign::Minus) { + return Err(anyhow::anyhow!("number is negative")); + } - let numerator = NonZeroU64::new(*rate_fraction.numer().expect("numerator is empty")) - .expect("numerator is zero"); - let denominator = NonZeroU64::new(*rate_fraction.denom().expect("denominator is empty")) - .expect("denominator is zero"); + let numerator = NonZeroU64::new( + *rate_fraction + .numer() + .ok_or(anyhow::anyhow!("number is not rational"))?, + ) + .ok_or(anyhow::anyhow!("numerator is zero"))?; + let denominator = NonZeroU64::new( + *rate_fraction + .denom() + .ok_or(anyhow::anyhow!("number is not rational"))?, + ) + .ok_or(anyhow::anyhow!("denominator is zero"))?; - (numerator, denominator) + Ok((numerator, denominator)) +} + +#[cfg(test)] +pub(crate) mod tests { + use super::*; + + fn assert_get_fraction_value(f: f64, num: u64, denum: u64) { + assert_eq!( + get_fraction(f).unwrap(), + ( + NonZeroU64::try_from(num).unwrap(), + NonZeroU64::try_from(denum).unwrap() + ) + ); + } + + #[allow(clippy::approx_constant)] + #[test] + fn test_float_to_fraction_conversion_as_expected() { + assert_get_fraction_value(1.0, 1, 1); + assert_get_fraction_value(1337.0, 1337, 1); + assert_get_fraction_value(0.1, 1, 10); + assert_get_fraction_value(3.141, 3141, 1000); + assert_get_fraction_value(1_000_000.0, 1_000_000, 1); + assert_get_fraction_value(3123.47, 312347, 100); + // below tests assume some not necessarily required behaviour of get_fraction + assert_get_fraction_value(0.2, 1, 5); + assert_get_fraction_value(0.5, 1, 2); + assert_get_fraction_value(3.1415, 6283, 2000); + } + + #[test] + fn test_to_fraction_bad_inputs() { + assert_eq!( + get_fraction(0.0).expect_err("did not error").to_string(), + "numerator is zero" + ); + assert_eq!( + get_fraction(-1.0).expect_err("did not error").to_string(), + "number is negative" + ); + assert_eq!( + get_fraction(f64::NAN) + .expect_err("did not error") + .to_string(), + "number is not rational" + ); + assert_eq!( + get_fraction(f64::INFINITY) + .expect_err("did not error") + .to_string(), + "number is not rational" + ); + } } diff --git a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs index 0d1ff91163f..cf2b3a0d089 100644 --- a/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs +++ b/core/lib/l1_contract_interface/src/i_executor/structures/commit_batch_info.rs @@ -218,12 +218,12 @@ impl Tokenizable for CommitBatchInfo<'_> { (L1BatchCommitmentMode::Validium, PubdataDA::Custom) => { let mut operator_da_input = vec![PUBDATA_SOURCE_CUSTOM]; operator_da_input.extend( - &self + &self .l1_batch_with_metadata .metadata .da_blob_id .clone() - .unwrap_or_default() + .unwrap_or_default(), ); operator_da_input } diff --git a/core/lib/mempool/src/tests.rs b/core/lib/mempool/src/tests.rs index 6ea1be3b514..96ef600984f 100644 --- a/core/lib/mempool/src/tests.rs +++ b/core/lib/mempool/src/tests.rs @@ -371,7 +371,7 @@ fn gen_l2_tx(address: Address, nonce: Nonce) -> Transaction { fn gen_l2_tx_with_timestamp(address: Address, nonce: Nonce, received_at_ms: u64) -> Transaction { let mut txn = L2Tx::new( - Address::default(), + Some(Address::default()), Vec::new(), nonce, Fee::default(), @@ -386,7 +386,7 @@ fn gen_l2_tx_with_timestamp(address: Address, nonce: Nonce, received_at_ms: u64) fn gen_l1_tx(priority_id: PriorityOpId) -> Transaction { let execute = Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/Cargo.toml b/core/lib/multivm/Cargo.toml index 5e76c10f53e..7d604157d1a 100644 --- a/core/lib/multivm/Cargo.toml +++ b/core/lib/multivm/Cargo.toml @@ -34,14 +34,13 @@ anyhow.workspace = true hex.workspace = true itertools.workspace = true once_cell.workspace = true -pretty_assertions.workspace = true thiserror.workspace = true tracing.workspace = true vise.workspace = true [dev-dependencies] assert_matches.workspace = true -tokio = { workspace = true, features = ["time"] } +pretty_assertions.workspace = true zksync_test_account.workspace = true ethabi.workspace = true zksync_eth_signer.workspace = true diff --git a/core/lib/multivm/src/lib.rs b/core/lib/multivm/src/lib.rs index 77851a1df00..e171a78e179 100644 --- a/core/lib/multivm/src/lib.rs +++ b/core/lib/multivm/src/lib.rs @@ -16,11 +16,11 @@ pub use crate::{ vm_1_3_2, vm_1_4_1, vm_1_4_2, vm_boojum_integration, vm_fast, vm_latest, vm_m5, vm_m6, vm_refunds_enhancement, vm_virtual_blocks, }, - vm_instance::VmInstance, + vm_instance::{FastVmInstance, LegacyVmInstance}, }; mod glue; pub mod tracers; pub mod utils; -pub mod versions; +mod versions; mod vm_instance; diff --git a/core/lib/multivm/src/versions/mod.rs b/core/lib/multivm/src/versions/mod.rs index 81358a482f1..bcb246cece4 100644 --- a/core/lib/multivm/src/versions/mod.rs +++ b/core/lib/multivm/src/versions/mod.rs @@ -1,5 +1,8 @@ -pub mod shadow; mod shared; +#[cfg(test)] +mod testonly; +#[cfg(test)] +mod tests; pub mod vm_1_3_2; pub mod vm_1_4_1; pub mod vm_1_4_2; diff --git a/core/lib/multivm/src/versions/shadow.rs b/core/lib/multivm/src/versions/shadow.rs deleted file mode 100644 index 871258f43b8..00000000000 --- a/core/lib/multivm/src/versions/shadow.rs +++ /dev/null @@ -1,305 +0,0 @@ -use std::{ - collections::{BTreeMap, HashSet}, - fmt, -}; - -use anyhow::Context as _; -use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; - -use crate::{ - interface::{ - storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, - BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, - VmInterfaceHistoryEnabled, VmMemoryMetrics, - }, - vm_fast, -}; - -#[derive(Debug)] -pub struct ShadowVm { - main: T, - shadow: vm_fast::Vm>, -} - -impl VmFactory> for ShadowVm -where - S: ReadStorage, - T: VmFactory>, -{ - fn new( - batch_env: L1BatchEnv, - system_env: SystemEnv, - storage: StoragePtr>, - ) -> Self { - Self { - main: T::new(batch_env.clone(), system_env.clone(), storage.clone()), - shadow: vm_fast::Vm::new(batch_env, system_env, ImmutableStorageView::new(storage)), - } - } -} - -impl VmInterface for ShadowVm -where - S: ReadStorage, - T: VmInterface, -{ - type TracerDispatcher = T::TracerDispatcher; - - fn push_transaction(&mut self, tx: Transaction) { - self.shadow.push_transaction(tx.clone()); - self.main.push_transaction(tx); - } - - fn inspect( - &mut self, - dispatcher: Self::TracerDispatcher, - execution_mode: VmExecutionMode, - ) -> VmExecutionResultAndLogs { - let shadow_result = self.shadow.inspect((), execution_mode); - let main_result = self.main.inspect(dispatcher, execution_mode); - let mut errors = DivergenceErrors::default(); - errors.check_results_match(&main_result, &shadow_result); - errors - .into_result() - .with_context(|| format!("executing VM with mode {execution_mode:?}")) - .unwrap(); - main_result - } - - fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - self.shadow.start_new_l2_block(l2_block_env); - self.main.start_new_l2_block(l2_block_env); - } - - fn inspect_transaction_with_bytecode_compression( - &mut self, - tracer: Self::TracerDispatcher, - tx: Transaction, - with_compression: bool, - ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { - let tx_hash = tx.hash(); - let main_result = self.main.inspect_transaction_with_bytecode_compression( - tracer, - tx.clone(), - with_compression, - ); - let shadow_result = - self.shadow - .inspect_transaction_with_bytecode_compression((), tx, with_compression); - let mut errors = DivergenceErrors::default(); - errors.check_results_match(&main_result.1, &shadow_result.1); - errors - .into_result() - .with_context(|| { - format!("inspecting transaction {tx_hash:?}, with_compression={with_compression:?}") - }) - .unwrap(); - main_result - } - - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.main.record_vm_memory_metrics() - } - - fn finish_batch(&mut self) -> FinishedL1Batch { - let main_batch = self.main.finish_batch(); - let shadow_batch = self.shadow.finish_batch(); - - let mut errors = DivergenceErrors::default(); - errors.check_results_match( - &main_batch.block_tip_execution_result, - &shadow_batch.block_tip_execution_result, - ); - errors.check_final_states_match( - &main_batch.final_execution_state, - &shadow_batch.final_execution_state, - ); - errors.check_match( - "final_bootloader_memory", - &main_batch.final_bootloader_memory, - &shadow_batch.final_bootloader_memory, - ); - errors.check_match( - "pubdata_input", - &main_batch.pubdata_input, - &shadow_batch.pubdata_input, - ); - errors.check_match( - "state_diffs", - &main_batch.state_diffs, - &shadow_batch.state_diffs, - ); - errors.into_result().unwrap(); - main_batch - } -} - -#[must_use = "Should be converted to a `Result`"] -#[derive(Debug, Default)] -pub struct DivergenceErrors(Vec); - -impl DivergenceErrors { - fn check_results_match( - &mut self, - main_result: &VmExecutionResultAndLogs, - shadow_result: &VmExecutionResultAndLogs, - ) { - self.check_match("result", &main_result.result, &shadow_result.result); - self.check_match( - "logs.events", - &main_result.logs.events, - &shadow_result.logs.events, - ); - self.check_match( - "logs.system_l2_to_l1_logs", - &main_result.logs.system_l2_to_l1_logs, - &shadow_result.logs.system_l2_to_l1_logs, - ); - self.check_match( - "logs.user_l2_to_l1_logs", - &main_result.logs.user_l2_to_l1_logs, - &shadow_result.logs.user_l2_to_l1_logs, - ); - let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); - let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); - self.check_match("logs.storage_logs", &main_logs, &shadow_logs); - self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); - self.check_match( - "statistics.circuit_statistic", - &main_result.statistics.circuit_statistic, - &shadow_result.statistics.circuit_statistic, - ); - self.check_match( - "gas_remaining", - &main_result.statistics.gas_remaining, - &shadow_result.statistics.gas_remaining, - ); - } - - fn check_match(&mut self, context: &str, main: &T, shadow: &T) { - if main != shadow { - let comparison = pretty_assertions::Comparison::new(main, shadow); - let err = anyhow::anyhow!("`{context}` mismatch: {comparison}"); - self.0.push(err); - } - } - - fn check_final_states_match( - &mut self, - main: &CurrentExecutionState, - shadow: &CurrentExecutionState, - ) { - self.check_match("final_state.events", &main.events, &shadow.events); - self.check_match( - "final_state.user_l2_to_l1_logs", - &main.user_l2_to_l1_logs, - &shadow.user_l2_to_l1_logs, - ); - self.check_match( - "final_state.system_logs", - &main.system_logs, - &shadow.system_logs, - ); - self.check_match( - "final_state.storage_refunds", - &main.storage_refunds, - &shadow.storage_refunds, - ); - self.check_match( - "final_state.pubdata_costs", - &main.pubdata_costs, - &shadow.pubdata_costs, - ); - self.check_match( - "final_state.used_contract_hashes", - &main.used_contract_hashes.iter().collect::>(), - &shadow.used_contract_hashes.iter().collect::>(), - ); - - let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); - let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); - self.check_match( - "deduplicated_storage_logs", - &main_deduplicated_logs, - &shadow_deduplicated_logs, - ); - } - - fn gather_logs(logs: &[StorageLog]) -> BTreeMap { - logs.iter() - .filter(|log| log.is_write()) - .map(|log| (log.key, log)) - .collect() - } - - fn into_result(self) -> anyhow::Result<()> { - if self.0.is_empty() { - Ok(()) - } else { - Err(anyhow::anyhow!( - "divergence between old VM and new VM execution: [{:?}]", - self.0 - )) - } - } -} - -// The new VM doesn't support read logs yet, doesn't order logs by access and deduplicates them -// inside the VM, hence this auxiliary struct. -#[derive(PartialEq)] -struct UniqueStorageLogs(BTreeMap); - -impl fmt::Debug for UniqueStorageLogs { - fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { - let mut map = formatter.debug_map(); - for log in self.0.values() { - map.entry( - &format!("{:?}:{:?}", log.log.key.address(), log.log.key.key()), - &format!("{:?} -> {:?}", log.previous_value, log.log.value), - ); - } - map.finish() - } -} - -impl UniqueStorageLogs { - fn new(logs: &[StorageLogWithPreviousValue]) -> Self { - let mut unique_logs = BTreeMap::::new(); - for log in logs { - if !log.log.is_write() { - continue; - } - if let Some(existing_log) = unique_logs.get_mut(&log.log.key) { - existing_log.log.value = log.log.value; - } else { - unique_logs.insert(log.log.key, *log); - } - } - - // Remove no-op write logs (i.e., X -> X writes) produced by the old VM. - unique_logs.retain(|_, log| log.previous_value != log.log.value); - Self(unique_logs) - } -} - -impl VmInterfaceHistoryEnabled for ShadowVm -where - S: ReadStorage, - T: VmInterfaceHistoryEnabled, -{ - fn make_snapshot(&mut self) { - self.shadow.make_snapshot(); - self.main.make_snapshot(); - } - - fn rollback_to_the_latest_snapshot(&mut self) { - self.shadow.rollback_to_the_latest_snapshot(); - self.main.rollback_to_the_latest_snapshot(); - } - - fn pop_snapshot_no_rollback(&mut self) { - self.shadow.pop_snapshot_no_rollback(); - self.main.pop_snapshot_no_rollback(); - } -} diff --git a/core/lib/multivm/src/versions/testonly.rs b/core/lib/multivm/src/versions/testonly.rs new file mode 100644 index 00000000000..51a4d0842d9 --- /dev/null +++ b/core/lib/multivm/src/versions/testonly.rs @@ -0,0 +1,93 @@ +use zksync_contracts::BaseSystemContracts; +use zksync_test_account::Account; +use zksync_types::{ + block::L2BlockHasher, fee_model::BatchFeeInput, get_code_key, get_is_account_key, + helpers::unix_timestamp_ms, utils::storage_key_for_eth_balance, Address, L1BatchNumber, + L2BlockNumber, L2ChainId, ProtocolVersionId, U256, +}; +use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; + +use crate::{ + interface::{storage::InMemoryStorage, L1BatchEnv, L2BlockEnv, SystemEnv, TxExecutionMode}, + vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, +}; + +pub(super) fn default_system_env() -> SystemEnv { + SystemEnv { + zk_porter_available: false, + version: ProtocolVersionId::latest(), + base_system_smart_contracts: BaseSystemContracts::playground(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode: TxExecutionMode::VerifyExecute, + default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + chain_id: L2ChainId::from(270), + } +} + +pub(super) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { + let timestamp = unix_timestamp_ms(); + L1BatchEnv { + previous_batch_hash: None, + number, + timestamp, + fee_input: BatchFeeInput::l1_pegged( + 50_000_000_000, // 50 gwei + 250_000_000, // 0.25 gwei + ), + fee_account: Address::random(), + enforced_base_fee: None, + first_l2_block: L2BlockEnv { + number: 1, + timestamp, + prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), + max_virtual_blocks_to_create: 100, + }, + } +} + +pub(super) fn make_account_rich(storage: &mut InMemoryStorage, account: &Account) { + let key = storage_key_for_eth_balance(&account.address); + storage.set_value(key, u256_to_h256(U256::from(10_u64.pow(19)))); +} + +#[derive(Debug, Clone)] +pub(super) struct ContractToDeploy { + bytecode: Vec, + address: Address, + is_account: bool, +} + +impl ContractToDeploy { + pub fn new(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: false, + } + } + + pub fn account(bytecode: Vec, address: Address) -> Self { + Self { + bytecode, + address, + is_account: true, + } + } + + pub fn insert(&self, storage: &mut InMemoryStorage) { + let deployer_code_key = get_code_key(&self.address); + storage.set_value(deployer_code_key, hash_bytecode(&self.bytecode)); + if self.is_account { + let is_account_key = get_is_account_key(&self.address); + storage.set_value(is_account_key, u256_to_h256(1_u32.into())); + } + storage.store_factory_dep(hash_bytecode(&self.bytecode), self.bytecode.clone()); + } + + /// Inserts the contracts into the test environment, bypassing the deployer system contract. + pub fn insert_all(contracts: &[Self], storage: &mut InMemoryStorage) { + for contract in contracts { + contract.insert(storage); + } + } +} diff --git a/core/lib/multivm/src/versions/tests.rs b/core/lib/multivm/src/versions/tests.rs new file mode 100644 index 00000000000..c2a04c155fe --- /dev/null +++ b/core/lib/multivm/src/versions/tests.rs @@ -0,0 +1,276 @@ +//! Shadow VM tests. Since there are no real VM implementations in the `vm_interface` crate where `ShadowVm` is defined, +//! these tests are placed here. + +use assert_matches::assert_matches; +use ethabi::Contract; +use zksync_contracts::{ + get_loadnext_contract, load_contract, read_bytecode, + test_contracts::LoadnextContractExecutionParams, +}; +use zksync_test_account::{Account, TxType}; +use zksync_types::{ + block::L2BlockHasher, fee::Fee, AccountTreeId, Address, Execute, L1BatchNumber, L2BlockNumber, + ProtocolVersionId, StorageKey, H256, U256, +}; +use zksync_utils::bytecode::hash_bytecode; + +use crate::{ + interface::{ + storage::{InMemoryStorage, ReadStorage, StorageView}, + utils::{ShadowVm, VmDump}, + ExecutionResult, L1BatchEnv, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, + }, + utils::get_max_gas_per_pubdata_byte, + versions::testonly::{ + default_l1_batch, default_system_env, make_account_rich, ContractToDeploy, + }, + vm_fast, + vm_latest::{self, HistoryEnabled}, +}; + +type ReferenceVm = vm_latest::Vm, HistoryEnabled>; +type ShadowedFastVm = crate::vm_instance::ShadowedFastVm; + +fn hash_block(block_env: L2BlockEnv, tx_hashes: &[H256]) -> H256 { + let mut hasher = L2BlockHasher::new( + L2BlockNumber(block_env.number), + block_env.timestamp, + block_env.prev_block_hash, + ); + for &tx_hash in tx_hashes { + hasher.push_tx_hash(tx_hash); + } + hasher.finalize(ProtocolVersionId::latest()) +} + +fn tx_fee(gas_limit: u32) -> Fee { + Fee { + gas_limit: U256::from(gas_limit), + max_fee_per_gas: U256::from(250_000_000), + max_priority_fee_per_gas: U256::from(0), + gas_per_pubdata_limit: U256::from(get_max_gas_per_pubdata_byte( + ProtocolVersionId::latest().into(), + )), + } +} + +#[derive(Debug)] +struct Harness { + alice: Account, + bob: Account, + storage_contract: ContractToDeploy, + storage_contract_abi: Contract, + current_block: L2BlockEnv, +} + +impl Harness { + const STORAGE_CONTRACT_PATH: &'static str = + "etc/contracts-test-data/artifacts-zk/contracts/storage/storage.sol/StorageTester.json"; + const STORAGE_CONTRACT_ADDRESS: Address = Address::repeat_byte(23); + + fn new(l1_batch_env: &L1BatchEnv) -> Self { + Self { + alice: Account::random(), + bob: Account::random(), + storage_contract: ContractToDeploy::new( + read_bytecode(Self::STORAGE_CONTRACT_PATH), + Self::STORAGE_CONTRACT_ADDRESS, + ), + storage_contract_abi: load_contract(Self::STORAGE_CONTRACT_PATH), + current_block: l1_batch_env.first_l2_block, + } + } + + fn setup_storage(&self, storage: &mut InMemoryStorage) { + make_account_rich(storage, &self.alice); + make_account_rich(storage, &self.bob); + + self.storage_contract.insert(storage); + let storage_contract_key = StorageKey::new( + AccountTreeId::new(Self::STORAGE_CONTRACT_ADDRESS), + H256::zero(), + ); + storage.set_value_hashed_enum( + storage_contract_key.hashed_key(), + 999, + H256::from_low_u64_be(42), + ); + } + + fn assert_dump(dump: &mut VmDump) { + assert_eq!(dump.l1_batch_number(), L1BatchNumber(1)); + let tx_counts_per_block: Vec<_> = + dump.l2_blocks.iter().map(|block| block.txs.len()).collect(); + assert_eq!(tx_counts_per_block, [1, 2, 2, 0]); + + let storage_contract_key = StorageKey::new( + AccountTreeId::new(Self::STORAGE_CONTRACT_ADDRESS), + H256::zero(), + ); + let value = dump.storage.read_value(&storage_contract_key); + assert_eq!(value, H256::from_low_u64_be(42)); + let enum_index = dump.storage.get_enumeration_index(&storage_contract_key); + assert_eq!(enum_index, Some(999)); + } + + fn new_block(&mut self, vm: &mut impl VmInterface, tx_hashes: &[H256]) { + self.current_block = L2BlockEnv { + number: self.current_block.number + 1, + timestamp: self.current_block.timestamp + 1, + prev_block_hash: hash_block(self.current_block, tx_hashes), + max_virtual_blocks_to_create: self.current_block.max_virtual_blocks_to_create, + }; + vm.start_new_l2_block(self.current_block); + } + + fn execute_on_vm(&mut self, vm: &mut impl VmInterface) { + let transfer_exec = Execute { + contract_address: Some(self.bob.address()), + calldata: vec![], + value: 1_000_000_000.into(), + factory_deps: vec![], + }; + let transfer_to_bob = self + .alice + .get_l2_tx_for_execute(transfer_exec.clone(), None); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(transfer_to_bob.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + self.new_block(vm, &[transfer_to_bob.hash()]); + + let out_of_gas_transfer = self.bob.get_l2_tx_for_execute( + transfer_exec.clone(), + Some(tx_fee(200_000)), // high enough to pass validation + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(out_of_gas_transfer.clone(), true); + compression_result.unwrap(); + assert_matches!(exec_result.result, ExecutionResult::Revert { .. }); + + let write_fn = self.storage_contract_abi.function("simpleWrite").unwrap(); + let simple_write_tx = self.alice.get_l2_tx_for_execute( + Execute { + contract_address: Some(Self::STORAGE_CONTRACT_ADDRESS), + calldata: write_fn.encode_input(&[]).unwrap(), + value: 0.into(), + factory_deps: vec![], + }, + None, + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(simple_write_tx.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + let storage_contract_key = StorageKey::new( + AccountTreeId::new(Self::STORAGE_CONTRACT_ADDRESS), + H256::zero(), + ); + let storage_logs = &exec_result.logs.storage_logs; + assert!(storage_logs.iter().any(|log| { + log.log.key == storage_contract_key && log.previous_value == H256::from_low_u64_be(42) + })); + + self.new_block(vm, &[out_of_gas_transfer.hash(), simple_write_tx.hash()]); + + let deploy_tx = self.alice.get_deploy_tx( + &get_loadnext_contract().bytecode, + Some(&[ethabi::Token::Uint(100.into())]), + TxType::L2, + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(deploy_tx.tx.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + let load_test_tx = self.bob.get_loadnext_transaction( + deploy_tx.address, + LoadnextContractExecutionParams::default(), + TxType::L2, + ); + let (compression_result, exec_result) = + vm.execute_transaction_with_bytecode_compression(load_test_tx.clone(), true); + compression_result.unwrap(); + assert!(!exec_result.result.is_failed(), "{:#?}", exec_result); + + self.new_block(vm, &[deploy_tx.tx.hash(), load_test_tx.hash()]); + vm.finish_batch(); + } +} + +fn sanity_check_vm() -> (Vm, Harness) +where + Vm: VmFactory>, +{ + let system_env = default_system_env(); + let l1_batch_env = default_l1_batch(L1BatchNumber(1)); + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut harness = Harness::new(&l1_batch_env); + harness.setup_storage(&mut storage); + + let storage = StorageView::new(storage).to_rc_ptr(); + let mut vm = Vm::new(l1_batch_env, system_env, storage); + harness.execute_on_vm(&mut vm); + (vm, harness) +} + +#[test] +fn sanity_check_harness() { + sanity_check_vm::(); +} + +#[test] +fn sanity_check_harness_on_new_vm() { + sanity_check_vm::>(); +} + +#[test] +fn sanity_check_shadow_vm() { + let system_env = default_system_env(); + let l1_batch_env = default_l1_batch(L1BatchNumber(1)); + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + let mut harness = Harness::new(&l1_batch_env); + harness.setup_storage(&mut storage); + + // We need separate storage views since they are mutated by the VM execution + let main_storage = StorageView::new(&storage).to_rc_ptr(); + let shadow_storage = StorageView::new(&storage).to_rc_ptr(); + let mut vm = ShadowVm::<_, ReferenceVm<_>, ReferenceVm<_>>::with_custom_shadow( + l1_batch_env, + system_env, + main_storage, + shadow_storage, + ); + harness.execute_on_vm(&mut vm); +} + +#[test] +fn shadow_vm_basics() { + let (vm, harness) = sanity_check_vm::(); + let mut dump = vm.dump_state(); + Harness::assert_dump(&mut dump); + + // Test standard playback functionality. + let replayed_dump = dump.clone().play_back::>().dump_state(); + pretty_assertions::assert_eq!(replayed_dump, dump); + + // Check that the VM executes identically when reading from the original storage and one restored from the dump. + let mut storage = InMemoryStorage::with_system_contracts(hash_bytecode); + harness.setup_storage(&mut storage); + let storage = StorageView::new(storage).to_rc_ptr(); + + let vm = dump + .clone() + .play_back_custom(|l1_batch_env, system_env, dump_storage| { + ShadowVm::<_, ReferenceVm, ReferenceVm<_>>::with_custom_shadow( + l1_batch_env, + system_env, + storage, + dump_storage, + ) + }); + let new_dump = vm.dump_state(); + pretty_assertions::assert_eq!(new_dump, dump); +} diff --git a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs index a29e1101d52..34c70e0f9c4 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/test_utils.rs @@ -153,7 +153,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs index 788a52206e8..0285320daa3 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_3_2::vm_with_bootloader::{ pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -170,7 +170,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -593,7 +593,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs index 8068e4847b8..89196788a76 100644 --- a/core/lib/multivm/src/versions/vm_1_3_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_3_2/vm.rs @@ -22,6 +22,25 @@ pub struct Vm { pub(crate) system_env: SystemEnv, } +impl Vm { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } +} + impl VmInterface for Vm { type TracerDispatcher = TracerDispatcher; @@ -36,7 +55,7 @@ impl VmInterface for Vm { fn inspect( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { @@ -80,7 +99,7 @@ impl VmInterface for Vm { fn inspect_transaction_with_bytecode_compression( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { @@ -160,23 +179,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs index 393eb043cb7..1acf75b27e1 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs index db5aaa783df..2160c4b56a0 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/execution.rs @@ -1,3 +1,5 @@ +use std::mem; + use zk_evm_1_4_1::aux_structures::Timestamp; use crate::{ @@ -20,7 +22,7 @@ use crate::{ impl Vm { pub(crate) fn inspect_inner( &mut self, - dispatcher: TracerDispatcher, + dispatcher: &mut TracerDispatcher, execution_mode: VmExecutionMode, custom_pubdata_tracer: Option>, ) -> VmExecutionResultAndLogs { @@ -44,7 +46,7 @@ impl Vm { /// Collect the result from the default tracers. fn inspect_and_collect_results( &mut self, - dispatcher: TracerDispatcher, + dispatcher: &mut TracerDispatcher, execution_mode: VmExecutionMode, with_refund_tracer: bool, custom_pubdata_tracer: Option>, @@ -54,7 +56,7 @@ impl Vm { let mut tx_tracer: DefaultExecutionTracer = DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, execution_mode, - dispatcher, + mem::take(dispatcher), self.storage.clone(), refund_tracers, custom_pubdata_tracer @@ -90,6 +92,7 @@ impl Vm { circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); + *dispatcher = tx_tracer.dispatcher; let result = VmExecutionResultAndLogs { result, diff --git a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs index 71ae20d4406..3a3b22ea246 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs deleted file mode 100644 index ba699e7558b..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/block_tip.rs +++ /dev/null @@ -1,284 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::{ - aux_structures::Timestamp, zkevm_opcode_defs::system_params::MAX_PUBDATA_PER_BLOCK, -}; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_BATCH_TIP_OVERHEAD, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - HistoryEnabled, TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -fn execute_test(test_data: L1MessengerTestData) -> u32 { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs, - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!(!result.result.is_failed(), "Batch wasn't successful"); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - ergs_before - ergs_after -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -#[test] -fn test_dry_run_upper_bound() { - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let max_logs = execute_test(L1MessengerTestData { - l2_to_l1_logs: L2ToL1Log::MIN_L2_L1_LOGS_TREE_SIZE, - ..Default::default() - }); - - let max_messages = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; 0]; MAX_PUBDATA_PER_BLOCK as usize / L2ToL1Log::SERIALIZED_SIZE], - ..Default::default() - }); - - let long_message = execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize]; 1], - ..Default::default() - }); - - let max_bytecodes = execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long - bytecodes: vec![vec![0; 32]; MAX_PUBDATA_PER_BLOCK as usize / 32], - ..Default::default() - }); - - let long_bytecode = execute_test(L1MessengerTestData { - // We have to add 48 since a valid bytecode must have an odd number of 32 byte words - bytecodes: vec![vec![0; MAX_PUBDATA_PER_BLOCK as usize + 48]; 1], - ..Default::default() - }); - - let lots_of_small_repeated_writes = execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_PUBDATA_PER_BLOCK as usize / 5), - ..Default::default() - }); - - let lots_of_big_repeated_writes = execute_test(L1MessengerTestData { - // Each big write will approximately require 32 bytes to encode - state_diffs: generate_state_diffs(true, false, MAX_PUBDATA_PER_BLOCK as usize / 32), - ..Default::default() - }); - - let lots_of_small_initial_writes = execute_test(L1MessengerTestData { - // Each initial write will take at least 32 bytes for derived key + 5 bytes for value - state_diffs: generate_state_diffs(false, true, MAX_PUBDATA_PER_BLOCK as usize / 37), - ..Default::default() - }); - - let lots_of_large_initial_writes = execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_PUBDATA_PER_BLOCK as usize / 64), - ..Default::default() - }); - - let max_used_gas = vec![ - max_logs, - max_messages, - long_message, - max_bytecodes, - long_bytecode, - lots_of_small_repeated_writes, - lots_of_big_repeated_writes, - lots_of_small_initial_writes, - lots_of_large_initial_writes, - ] - .into_iter() - .max() - .unwrap(); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - assert!( - max_used_gas * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs deleted file mode 100644 index 47e047ebbf7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs deleted file mode 100644 index 9db5e7326e7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs deleted file mode 100644 index 1a4c026a23f..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs deleted file mode 100644 index ecc2fdfe6c0..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/circuits.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled}, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.00226, 0.00077, 0.1195, 0.1429, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs deleted file mode 100644 index be8e253c6d8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/default_aa.rs +++ /dev/null @@ -1,78 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs deleted file mode 100644 index 0ec921450da..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/gas_limit.rs +++ /dev/null @@ -1,47 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs deleted file mode 100644 index a7cbcd8e295..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs deleted file mode 100644 index 75517138db3..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_1::tests::tester::VmTesterBuilder; -use crate::vm_1_4_1::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_1::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs deleted file mode 100644 index 7644064f4af..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs deleted file mode 100644 index e98fc23c6eb..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l1_tx_execution.rs +++ /dev/null @@ -1,189 +0,0 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} - -#[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: L1_MESSENGER_ADDRESS, - value: 0.into(), - factory_deps: None, - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs deleted file mode 100644 index 073d9ce5800..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs deleted file mode 100644 index a07608121bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs deleted file mode 100644 index 915a802b1e8..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs deleted file mode 100644 index 37e871fbc70..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs deleted file mode 100644 index 8700eb14b53..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/refunds.rs +++ /dev/null @@ -1,166 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs deleted file mode 100644 index aebc956e673..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs deleted file mode 100644 index 2ae942c2652..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs deleted file mode 100644 index 384bc4cf325..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_1::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs deleted file mode 100644 index 11e9d7fd6df..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_1::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d980..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs deleted file mode 100644 index 443acf71676..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_1::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs deleted file mode 100644 index 24bd0b4d0bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_1::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs deleted file mode 100644 index 02c7590c1be..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_1::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs deleted file mode 100644 index af3701d919f..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/upgrade.rs +++ /dev/null @@ -1,355 +0,0 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_1::{ - tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs deleted file mode 100644 index da69c107a20..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_1/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_1::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs index 1379b853a54..f7384da76d0 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_4_1::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -311,7 +311,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs index 2c1a4ba5e36..4122ee94e66 100644 --- a/core/lib/multivm/src/versions/vm_1_4_1/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_1/vm.rs @@ -11,7 +11,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_1::{ @@ -90,7 +89,7 @@ impl VmInterface for Vm { /// Execute VM with custom tracers. fn inspect( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { self.inspect_inner(tracer, execution_mode, None) @@ -102,7 +101,7 @@ impl VmInterface for Vm { fn inspect_transaction_with_bytecode_compression( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { @@ -124,12 +123,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); + let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs index 600ab83bf48..182f6eff441 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs index 92a2eaa650c..754b8476182 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs deleted file mode 100644 index 8578b73ccfa..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/block_tip.rs +++ /dev/null @@ -1,399 +0,0 @@ -use std::borrow::BorrowMut; - -use ethabi::Token; -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::load_sys_contract; -use zksync_system_constants::{ - CONTRACT_FORCE_DEPLOYER_ADDRESS, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, -}; -use zksync_types::{ - commitment::SerializeCommitment, get_code_key, l2_to_l1_log::L2ToL1Log, - writes::StateDiffRecord, Address, Execute, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_complex_upgrade}; -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, - MAX_VM_PUBDATA_PER_BATCH, - }, - tests::tester::{get_empty_storage, InMemoryStorageView, VmTesterBuilder}, - tracers::PubdataTracer, - TracerDispatcher, - }, -}; - -#[derive(Debug, Clone, Default)] -struct L1MessengerTestData { - l2_to_l1_logs: usize, - messages: Vec>, - bytecodes: Vec>, - state_diffs: Vec, -} - -struct MimicCallInfo { - to: Address, - who_to_mimic: Address, - data: Vec, -} - -fn populate_mimic_calls(data: L1MessengerTestData) -> Vec { - let complex_upgrade = get_complex_upgrade_abi(); - let l1_messenger = load_sys_contract("L1Messenger"); - - let logs_mimic_calls = (0..data.l2_to_l1_logs).map(|_| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendL2ToL1Log") - .unwrap() - .encode_input(&[ - Token::Bool(false), - Token::FixedBytes(H256::random().0.to_vec()), - Token::FixedBytes(H256::random().0.to_vec()), - ]) - .unwrap(), - }); - let messages_mimic_calls = data.messages.iter().map(|message| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("sendToL1") - .unwrap() - .encode_input(&[Token::Bytes(message.clone())]) - .unwrap(), - }); - let bytecodes_mimic_calls = data.bytecodes.iter().map(|bytecode| MimicCallInfo { - to: L1_MESSENGER_ADDRESS, - who_to_mimic: KNOWN_CODES_STORAGE_ADDRESS, - data: l1_messenger - .function("requestBytecodeL1Publication") - .unwrap() - .encode_input(&[Token::FixedBytes(hash_bytecode(bytecode).0.to_vec())]) - .unwrap(), - }); - - let encoded_calls = logs_mimic_calls - .chain(messages_mimic_calls) - .chain(bytecodes_mimic_calls) - .map(|call| { - Token::Tuple(vec![ - Token::Address(call.to), - Token::Address(call.who_to_mimic), - Token::Bytes(call.data), - ]) - }) - .collect::>(); - - complex_upgrade - .function("mimicCalls") - .unwrap() - .encode_input(&[Token::Array(encoded_calls)]) - .unwrap() -} - -struct TestStatistics { - pub max_used_gas: u32, - pub circuit_statistics: u64, - pub execution_metrics_size: u64, -} - -struct StatisticsTagged { - pub statistics: TestStatistics, - pub tag: String, -} - -fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { - let mut storage = get_empty_storage(); - let complex_upgrade_code = read_complex_upgrade(); - - // For this test we'll just put the bytecode onto the force deployer address - storage.borrow_mut().set_value( - get_code_key(&CONTRACT_FORCE_DEPLOYER_ADDRESS), - hash_bytecode(&complex_upgrade_code), - ); - storage - .borrow_mut() - .store_factory_dep(hash_bytecode(&complex_upgrade_code), complex_upgrade_code); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_storage(storage) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecodes = test_data - .bytecodes - .iter() - .map(|bytecode| { - let hash = hash_bytecode(bytecode); - let words = bytes_to_be_words(bytecode.clone()); - (h256_to_u256(hash), words) - }) - .collect(); - vm.vm - .state - .decommittment_processor - .populate(bytecodes, Timestamp(0)); - - let data = populate_mimic_calls(test_data.clone()); - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, - calldata: data, - value: U256::zero(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "Transaction wasn't successful for input: {:?}", - test_data - ); - - // Now we count how much ergs were spent at the end of the batch - // It is assumed that the top level frame is the bootloader - - let ergs_before = vm.vm.state.local_state.callstack.current.ergs_remaining; - - // We ensure that indeed the provided state diffs are used - let pubdata_tracer = PubdataTracer::::new_with_forced_state_diffs( - vm.vm.batch_env.clone(), - VmExecutionMode::Batch, - test_data.state_diffs.clone(), - ); - - let result = vm.vm.inspect_inner( - TracerDispatcher::default(), - VmExecutionMode::Batch, - Some(pubdata_tracer), - ); - - assert!( - !result.result.is_failed(), - "Batch wasn't successful for input: {:?}", - test_data - ); - - let ergs_after = vm.vm.state.local_state.callstack.current.ergs_remaining; - - assert_eq!( - (ergs_before - ergs_after) as u64, - result.statistics.gas_used - ); - - TestStatistics { - max_used_gas: ergs_before - ergs_after, - circuit_statistics: result.statistics.circuit_statistic.total() as u64, - execution_metrics_size: result.get_execution_metrics(None).size() as u64, - } -} - -fn generate_state_diffs( - repeated_writes: bool, - small_diff: bool, - number_of_state_diffs: usize, -) -> Vec { - (0..number_of_state_diffs) - .map(|i| { - let address = Address::from_low_u64_be(i as u64); - let key = U256::from(i); - let enumeration_index = if repeated_writes { i + 1 } else { 0 }; - - let (initial_value, final_value) = if small_diff { - // As small as it gets, one byte to denote zeroing out the value - (U256::from(1), U256::from(0)) - } else { - // As large as it gets - (U256::from(0), U256::from(2).pow(255.into())) - }; - - StateDiffRecord { - address, - key, - derived_key: u256_to_h256(i.into()).0, - enumeration_index: enumeration_index as u64, - initial_value, - final_value, - } - }) - .collect() -} - -// A valid zkEVM bytecode has odd number of 32 byte words -fn get_valid_bytecode_length(length: usize) -> usize { - // Firstly ensure that the length is divisible by 32 - let length_padded_to_32 = if length % 32 == 0 { - length - } else { - length + 32 - (length % 32) - }; - - // Then we ensure that the number returned by division by 32 is odd - if length_padded_to_32 % 64 == 0 { - length_padded_to_32 + 32 - } else { - length_padded_to_32 - } -} - -#[test] -#[allow(clippy::vec_init_then_push)] -fn test_dry_run_upper_bound() { - // Some of the pubdata is consumed by constant fields (such as length of messages, number of logs, etc.). - // While this leaves some room for error, at the end of the test we require that the `BOOTLOADER_BATCH_TIP_OVERHEAD` - // is sufficient with a very large margin, so it is okay to ignore 1% of possible pubdata. - const MAX_EFFECTIVE_PUBDATA_PER_BATCH: usize = - (MAX_VM_PUBDATA_PER_BATCH as f64 * 0.99) as usize; - - // We are re-using the `ComplexUpgrade` contract as it already has the `mimicCall` functionality. - // To get the upper bound, we'll try to do the following: - // 1. Max number of logs. - // 2. Lots of small L2->L1 messages / one large L2->L1 message. - // 3. Lots of small bytecodes / one large bytecode. - // 4. Lots of storage slot updates. - - let mut statistics = Vec::new(); - - // max logs - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - l2_to_l1_logs: MAX_EFFECTIVE_PUBDATA_PER_BATCH / L2ToL1Log::SERIALIZED_SIZE, - ..Default::default() - }), - tag: "max_logs".to_string(), - }); - - // max messages - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log + its length, which is a 4 byte number, - // so the max number of pubdata is bound by it - messages: vec![ - vec![0; 0]; - MAX_EFFECTIVE_PUBDATA_PER_BATCH / (L2ToL1Log::SERIALIZED_SIZE + 4) - ], - ..Default::default() - }), - tag: "max_messages".to_string(), - }); - - // long message - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each L2->L1 message is accompanied by a Log, so the max number of pubdata is bound by it - messages: vec![vec![0; MAX_EFFECTIVE_PUBDATA_PER_BATCH]; 1], - ..Default::default() - }), - tag: "long_message".to_string(), - }); - - // max bytecodes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each bytecode must be at least 32 bytes long. - // Each uncompressed bytecode is accompanied by its length, which is a 4 byte number - bytecodes: vec![vec![0; 32]; MAX_EFFECTIVE_PUBDATA_PER_BATCH / (32 + 4)], - ..Default::default() - }), - tag: "max_bytecodes".to_string(), - }); - - // long bytecode - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - bytecodes: vec![vec![0; get_valid_bytecode_length(MAX_EFFECTIVE_PUBDATA_PER_BATCH)]; 1], - ..Default::default() - }), - tag: "long_bytecode".to_string(), - }); - - // lots of small repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // In theory each state diff can require only 5 bytes to be published (enum index + 4 bytes for the key) - state_diffs: generate_state_diffs(true, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 5), - ..Default::default() - }), - tag: "small_repeated_writes".to_string(), - }); - - // lots of big repeated writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big repeated write will approximately require 4 bytes for key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(true, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 37), - ..Default::default() - }), - tag: "big_repeated_writes".to_string(), - }); - - // lots of small initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each small initial write will take at least 32 bytes for derived key + 1 bytes encoding zeroing out - state_diffs: generate_state_diffs(false, true, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 33), - ..Default::default() - }), - tag: "small_initial_writes".to_string(), - }); - - // lots of large initial writes - statistics.push(StatisticsTagged { - statistics: execute_test(L1MessengerTestData { - // Each big write will take at least 32 bytes for derived key + 1 byte for encoding type + 32 bytes for value - state_diffs: generate_state_diffs(false, false, MAX_EFFECTIVE_PUBDATA_PER_BATCH / 65), - ..Default::default() - }), - tag: "big_initial_writes".to_string(), - }); - - // We use 2x overhead for the batch tip compared to the worst estimated scenario. - let max_used_gas = statistics - .iter() - .map(|s| (s.statistics.max_used_gas, s.tag.clone())) - .max() - .unwrap(); - assert!( - max_used_gas.0 * 2 <= BOOTLOADER_BATCH_TIP_OVERHEAD, - "BOOTLOADER_BATCH_TIP_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_OVERHEAD = {}", - max_used_gas.1, - max_used_gas.0, - BOOTLOADER_BATCH_TIP_OVERHEAD - ); - - let circuit_statistics = statistics - .iter() - .map(|s| (s.statistics.circuit_statistics, s.tag.clone())) - .max() - .unwrap(); - assert!( - circuit_statistics.0 * 2 <= BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD = {}", - circuit_statistics.1, - circuit_statistics.0, - BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD - ); - - let execution_metrics_size = statistics - .iter() - .map(|s| (s.statistics.execution_metrics_size, s.tag.clone())) - .max() - .unwrap(); - assert!( - execution_metrics_size.0 * 2 <= BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD as u64, - "BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD is too low for {} with result {}, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD = {}", - execution_metrics_size.1, - execution_metrics_size.0, - BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs deleted file mode 100644 index 8d69d05c444..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bootloader.rs +++ /dev/null @@ -1,55 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_bootloader_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs deleted file mode 100644 index dd91d6d94a9..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/bytecode_publishing.rs +++ /dev/null @@ -1,40 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs deleted file mode 100644 index 2fafb7e51aa..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs deleted file mode 100644 index 7d0dfd1ed0e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/circuits.rs +++ /dev/null @@ -1,69 +0,0 @@ -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder}, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let s = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - const EXPECTED: [f32; 11] = [ - 1.1979, 0.1390, 1.5455, 0.0031, 1.0573, 0.00059, 0.003438, 0.00077, 0.1195, 0.1429, 0.0, - ]; - let actual = [ - (s.main_vm, "main_vm"), - (s.ram_permutation, "ram_permutation"), - (s.storage_application, "storage_application"), - (s.storage_sorter, "storage_sorter"), - (s.code_decommitter, "code_decommitter"), - (s.code_decommitter_sorter, "code_decommitter_sorter"), - (s.log_demuxer, "log_demuxer"), - (s.events_sorter, "events_sorter"), - (s.keccak256, "keccak256"), - (s.ecrecover, "ecrecover"), - (s.sha256, "sha256"), - ]; - for ((actual, name), expected) in actual.iter().zip(EXPECTED) { - if expected == 0.0 { - assert_eq!( - *actual, expected, - "Check failed for {}, expected {}, actual {}", - name, expected, actual - ); - } else { - let diff = (actual - expected) / expected; - assert!( - diff.abs() < 0.1, - "Check failed for {}, expected {}, actual {}", - name, - expected, - actual - ); - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs deleted file mode 100644 index b0717a57c56..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/default_aa.rs +++ /dev/null @@ -1,77 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - utils::fee::get_batch_base_fee, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * get_batch_base_fee(&vm.vm.batch_env); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) - * U256::from(get_batch_base_fee(&vm.vm.batch_env)); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs deleted file mode 100644 index 6a57fd07ae7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/gas_limit.rs +++ /dev/null @@ -1,46 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs deleted file mode 100644 index cfe3e1bfc23..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that `get_used_contracts()` updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: `Default_AA` will be in the list of used contracts if L2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (`known_bytecodes` will be updated but we expect `get_used_contracts()` to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs deleted file mode 100644 index c79fcd8ba8e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_1_4_2::tests::tester::VmTesterBuilder; -use crate::vm_1_4_2::types::inputs::system_env::TxExecutionMode; -use crate::vm_1_4_2::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs deleted file mode 100644 index 7da250ef7a9..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/is_write_initial.rs +++ /dev/null @@ -1,45 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs deleted file mode 100644 index 021f5554873..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l1_tx_execution.rs +++ /dev/null @@ -1,188 +0,0 @@ -use ethabi::Token; -use zksync_contracts::l1_messenger_contract; -use zksync_system_constants::{BOOTLOADER_ADDRESS, L1_MESSENGER_ADDRESS}; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - Execute, ExecuteTransactionCommon, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - `totalSupply` of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - `tx_rolling` hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in `L1Messenger` - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} - -#[test] -fn test_l1_tx_execution_high_gas_limit() { - // In this test, we try to execute an L1->L2 transaction with a high gas limit. - // Usually priority transactions with dangerously gas limit should even pass the checks on the L1, - // however, they might pass during the transition period to the new fee model, so we check that we can safely process those. - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let l1_messenger = l1_messenger_contract(); - - let contract_function = l1_messenger.function("sendToL1").unwrap(); - let params = [ - // Even a message of size 100k should not be able to be sent by a priority transaction - Token::Bytes(vec![0u8; 100_000]), - ]; - let calldata = contract_function.encode_input(¶ms).unwrap(); - - let mut tx = account.get_l1_tx( - Execute { - contract_address: L1_MESSENGER_ADDRESS, - value: 0.into(), - factory_deps: None, - calldata, - }, - 0, - ); - - if let ExecuteTransactionCommon::L1(data) = &mut tx.common_data { - // Using some large gas limit - data.gas_limit = 300_000_000.into(); - } else { - unreachable!() - }; - - vm.vm.push_transaction(tx); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res.result.is_failed(), "The transaction should've failed"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs deleted file mode 100644 index f722890f474..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_1::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs deleted file mode 100644 index a07608121bc..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// `mod invalid_bytecode;` -mod block_tip; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs deleted file mode 100644 index 9f1be4ec947..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/nonce_holder.rs +++ /dev/null @@ -1,187 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_1_4_2::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse `vm_builder` to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs deleted file mode 100644 index 0a799288204..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/precompiles.rs +++ /dev/null @@ -1,135 +0,0 @@ -use zk_evm_1_4_1::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 `sha256` calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 `ecrecover` call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_bootloader_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs deleted file mode 100644 index 5586450f34b..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/prestate_tracer.rs +++ /dev/null @@ -1,143 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_test_account::TxType; -use zksync_types::{utils::deployed_address_create, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::PrestateTracer, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_simple_transfer_contract}, - HistoryEnabled, ToTracerPointer, - }, -}; - -#[test] -fn test_prestate_tracer() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - vm.deploy_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm.test_contract.unwrap(), - false, - Default::default(), - true, - TxType::L2, - ); - vm.vm.push_transaction(tx1); - - let contract_address = vm.test_contract.unwrap(); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - assert!(prestate_result.1.contains_key(&contract_address)); -} - -#[test] -fn test_prestate_tracer_diff_mode() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - let contract = read_simple_transfer_contract(); - let tx = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce); - vm.test_contract = Some(deployed_address); - - // Deploy a second copy of the contract to see its appearance in the pre-state - let tx2 = vm - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce2 = tx2.nonce().unwrap().0.into(); - vm.vm.push_transaction(tx2); - vm.vm.execute(VmExecutionMode::OneTx); - let deployed_address2 = deployed_address_create(vm.deployer.as_ref().unwrap().address, nonce2); - - let account = &mut vm.rich_accounts[0]; - - //enter ether to contract to see difference in the balance post execution - let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), - calldata: Default::default(), - value: U256::from(100000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); - - let tx1 = Execute { - contract_address: deployed_address2, - calldata: Default::default(), - value: U256::from(200000), - factory_deps: None, - }; - - vm.vm - .push_transaction(account.get_l2_tx_for_execute(tx1, None)); - let prestate_tracer_result = Arc::new(OnceCell::default()); - let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); - let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); - - let prestate_result = Arc::try_unwrap(prestate_tracer_result) - .unwrap() - .take() - .unwrap_or_default(); - - //assert that the pre-state contains both deployed contracts with balance zero - assert!(prestate_result.0.contains_key(&deployed_address)); - assert!(prestate_result.0.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.0[&deployed_address].balance, - Some(U256::zero()) - ); - assert_eq!( - prestate_result.0[&deployed_address2].balance, - Some(U256::zero()) - ); - - //assert that the post-state contains both deployed contracts with the correct balance - assert!(prestate_result.1.contains_key(&deployed_address)); - assert!(prestate_result.1.contains_key(&deployed_address2)); - assert_eq!( - prestate_result.1[&deployed_address].balance, - Some(U256::from(100000)) - ); - assert_eq!( - prestate_result.1[&deployed_address2].balance, - Some(U256::from(200000)) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs deleted file mode 100644 index 401c2c12a43..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/refunds.rs +++ /dev/null @@ -1,169 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - // Overhead - let overhead = tx.overhead_gas(); - vm.vm.push_raw_transaction( - tx.clone(), - overhead, - result.refunds.gas_refunded as u32, - true, - ); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund as u32, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs deleted file mode 100644 index 15f4504d6e1..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/require_eip712.rs +++ /dev/null @@ -1,164 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - `private_address` - EOA account, where we have the key - // - `account_address` - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the `private_address`. - // (so that messages signed by `private_address`, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - max_fee_per_blob_gas: None, - blob_versioned_hashes: None, - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs deleted file mode 100644 index 2ce18cc0136..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_1::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_1_4_2::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs deleted file mode 100644 index 57b37e67b76..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/simple_execution.rs +++ /dev/null @@ -1,78 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_1_4_2::tests::tester::{TxType, VmTesterBuilder}, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(crate::vm_latest::HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs deleted file mode 100644 index d6c072d1b1e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/inner_state.rs +++ /dev/null @@ -1,131 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_1::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageValue, U256}; - -use crate::{ - vm_1_4_2::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - utils::logs::StorageLogQuery, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs deleted file mode 100644 index c3cc5d8d980..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{ - default_l1_batch, get_empty_storage, InMemoryStorageView, VmTester, VmTesterBuilder, -}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs deleted file mode 100644 index cb81c4c5ed7..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_1_4_2::tests::tester::vm_tester::VmTester, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs deleted file mode 100644 index 44f861f8d33..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tester/vm_tester.rs +++ /dev/null @@ -1,298 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_1_4_2::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // `insert_contracts(&mut self.storage, &self.custom_contracts);` - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_bootloader_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.bootloader_gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs deleted file mode 100644 index 138e8041e6a..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/tracing_execution_error.rs +++ /dev/null @@ -1,51 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_1_4_2::tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs deleted file mode 100644 index 2af2928b1c4..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/upgrade.rs +++ /dev/null @@ -1,352 +0,0 @@ -use zk_evm_1_4_1::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_upgrade::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::{get_complex_upgrade_abi, read_test_contract}; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_1_4_2::tests::{ - tester::VmTesterBuilder, - utils::{read_complex_upgrade, verify_required_storage}, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecode hash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(crate::vm_latest::HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in user space - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecode hash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implementation itself -// For the explanation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs b/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs deleted file mode 100644 index 5655e90fb4e..00000000000 --- a/core/lib/multivm/src/versions/vm_1_4_2/tests/utils.rs +++ /dev/null @@ -1,121 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_1_4_2::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} - -pub(crate) fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -pub(crate) fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} diff --git a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs index 3498e51ec30..38280aa8051 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_1_4_2::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -311,7 +311,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs index 71633dd3fca..fe2015debd2 100644 --- a/core/lib/multivm/src/versions/vm_1_4_2/vm.rs +++ b/core/lib/multivm/src/versions/vm_1_4_2/vm.rs @@ -1,3 +1,5 @@ +use std::mem; + use circuit_sequencer_api_1_4_2::sort_storage_access::sort_storage_access_queries; use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, @@ -11,7 +13,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_1_4_2::{ @@ -90,10 +91,10 @@ impl VmInterface for Vm { /// Execute VM with custom tracers. fn inspect( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { - self.inspect_inner(tracer, execution_mode, None) + self.inspect_inner(mem::take(tracer), execution_mode, None) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { @@ -102,12 +103,12 @@ impl VmInterface for Vm { fn inspect_transaction_with_bytecode_compression( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_with_compression(tx, with_compression); - let result = self.inspect_inner(tracer, VmExecutionMode::OneTx, None); + let result = self.inspect_inner(mem::take(tracer), VmExecutionMode::OneTx, None); if self.has_unpublished_bytecodes() { ( Err(BytecodeCompressionError::BytecodeCompressionFailed), @@ -124,12 +125,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); + let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs index 1a1c620c2b2..c97d3ff30e4 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/bootloader_state/utils.rs @@ -167,8 +167,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs index a7c790a4bc3..79669eddd56 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/execution.rs @@ -1,3 +1,5 @@ +use std::mem; + use zk_evm_1_4_0::aux_structures::Timestamp; use crate::{ @@ -20,7 +22,7 @@ use crate::{ impl Vm { pub(crate) fn inspect_inner( &mut self, - dispatcher: TracerDispatcher, + dispatcher: &mut TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { let mut enable_refund_tracer = false; @@ -39,7 +41,7 @@ impl Vm { /// Collect the result from the default tracers. fn inspect_and_collect_results( &mut self, - dispatcher: TracerDispatcher, + dispatcher: &mut TracerDispatcher, execution_mode: VmExecutionMode, with_refund_tracer: bool, ) -> (VmExecutionStopReason, VmExecutionResultAndLogs) { @@ -49,7 +51,7 @@ impl Vm { DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, execution_mode, - dispatcher, + mem::take(dispatcher), self.storage.clone(), refund_tracers, Some(PubdataTracer::new(self.batch_env.clone(), execution_mode)), @@ -84,6 +86,7 @@ impl Vm { circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); + *dispatcher = tx_tracer.dispatcher; // return the dispatcher back let result = VmExecutionResultAndLogs { result, diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs index 46f8bc2f400..015d5acd340 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/implementation/statistics.rs @@ -57,7 +57,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs deleted file mode 100644 index 57229abb097..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bootloader.rs +++ /dev/null @@ -1,56 +0,0 @@ -use zksync_types::U256; - -use crate::{ - interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BOOTLOADER_HEAP_PAGE, - tests::{ - tester::VmTesterBuilder, - utils::{get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs deleted file mode 100644 index ad1b0f26036..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/bytecode_publishing.rs +++ /dev/null @@ -1,43 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs deleted file mode 100644 index e9df4fa80ff..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/call_tracer.rs +++ /dev/null @@ -1,92 +0,0 @@ -use std::sync::Arc; - -use once_cell::sync::OnceCell; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - tracers::CallTracer, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::VmTesterBuilder, - utils::{read_max_depth_contract, read_test_contract}, - }, - HistoryEnabled, ToTracerPointer, - }, -}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs deleted file mode 100644 index b0cffa7d3c8..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/circuits.rs +++ /dev/null @@ -1,66 +0,0 @@ -use circuit_sequencer_api_1_4_0::geometry_config::get_geometry_config; -use zksync_types::{Address, Execute, U256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, tests::tester::VmTesterBuilder, HistoryEnabled, - }, -}; - -// Checks that estimated number of circuits for simple transfer doesn't differ much -// from hardcoded expected value. -#[test] -fn test_circuits() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: Address::random(), - calldata: Vec::new(), - value: U256::from(1u8), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let statistic = res.statistics.circuit_statistic; - // Check `circuit_statistic`. - assert!(statistic.main_vm > f32::EPSILON); - assert!(statistic.ram_permutation > f32::EPSILON); - assert!(statistic.storage_application > f32::EPSILON); - assert!(statistic.storage_sorter > f32::EPSILON); - assert!(statistic.code_decommitter > f32::EPSILON); - assert!(statistic.code_decommitter_sorter > f32::EPSILON); - assert!(statistic.log_demuxer > f32::EPSILON); - assert!(statistic.events_sorter > f32::EPSILON); - assert!(statistic.keccak256 > f32::EPSILON); - // Single `ecrecover` should be used to validate tx signature. - assert_eq!( - statistic.ecrecover, - 1.0 / get_geometry_config().cycles_per_ecrecover_circuit as f32 - ); - // `sha256` shouldn't be used. - assert_eq!(statistic.sha256, 0.0); - - const EXPECTED_CIRCUITS_USED: f32 = 4.6363; - let delta = (statistic.total_f32() - EXPECTED_CIRCUITS_USED) / EXPECTED_CIRCUITS_USED; - - if delta.abs() > 0.1 { - panic!( - "Estimation differs from expected result by too much: {}%, expected value: {}, got {}", - delta * 100.0, - EXPECTED_CIRCUITS_USED, - statistic.total_f32(), - ); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs deleted file mode 100644 index a8c20cfebc1..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/default_aa.rs +++ /dev/null @@ -1,76 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, get_nonce_key, - system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}, - AccountTreeId, U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::{get_balance, read_test_contract, verify_required_storage}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs deleted file mode 100644 index 30a65097111..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/gas_limit.rs +++ /dev/null @@ -1,47 +0,0 @@ -use zksync_types::{fee::Fee, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET}, - tests::tester::VmTesterBuilder, - HistoryDisabled, - }, -}; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs deleted file mode 100644 index 658bcd75b05..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/get_used_contracts.rs +++ /dev/null @@ -1,109 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::{bytecode::hash_bytecode, h256_to_u256}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryDisabled, Vm, - }, - HistoryMode, -}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs deleted file mode 100644 index 079e6d61b6c..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_boojum_integration::tests::tester::VmTesterBuilder; -use crate::vm_boojum_integration::types::inputs::system_env::TxExecutionMode; -use crate::vm_boojum_integration::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs deleted file mode 100644 index 67901490edf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/is_write_initial.rs +++ /dev/null @@ -1,48 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - HistoryDisabled, - }, -}; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs deleted file mode 100644 index b547f346d28..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l1_tx_execution.rs +++ /dev/null @@ -1,139 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{ - get_code_key, get_known_code_key, - l2_to_l1_log::{L2ToL1Log, UserL2ToL1Log}, - storage_writes_deduplicator::StorageWritesDeduplicator, - U256, -}; -use zksync_utils::u256_to_h256; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{TxType, VmTesterBuilder}, - utils::{read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS}, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 7 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rolling hash - // - rolling hash of L2->L1 logs - // - transaction number in block counter - // - L2->L1 log counter in L1Messenger - - // TODO(PLA-537): right now we are using 4 slots instead of 7 due to 0 fee for transaction. - let basic_initial_writes = 4; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs: Vec<_> = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }] - .into_iter() - .map(UserL2ToL1Log) - .collect(); - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs deleted file mode 100644 index d637d583c0e..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/l2_blocks.rs +++ /dev/null @@ -1,437 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use zk_evm_1_4_0::aux_structures::Timestamp; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - block::{pack_block_info, MiniblockHasher}, - AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, ProtocolVersionId, StorageKey, Transaction, H160, H256, - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -use crate::{ - interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, - TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, - }, - tests::tester::{default_l1_batch, VmTesterBuilder}, - utils::l2_blocks::get_l2_block_hash_key, - HistoryEnabled, Vm, - }, - HistoryMode, -}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - MiniblockHasher::legacy_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 1, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - None, - ); - - let prev_block_hash = MiniblockHasher::legacy_hash(MiniblockNumber(0)); - let prev_block_hash = MiniblockHasher::new(MiniblockNumber(1), 8, prev_block_hash) - .finalize(ProtocolVersionId::latest()); - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash, - max_virtual_blocks_to_create: 1, - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs deleted file mode 100644 index 95377232b3e..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/mod.rs +++ /dev/null @@ -1,22 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod circuits; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod precompiles; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs deleted file mode 100644 index 44ba3e4e323..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/nonce_holder.rs +++ /dev/null @@ -1,188 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, - }, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTesterBuilder}, - utils::read_nonce_holder_tester, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs deleted file mode 100644 index 516331d574f..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/precompiles.rs +++ /dev/null @@ -1,136 +0,0 @@ -use zk_evm_1_4_0::zk_evm_abstractions::precompiles::PrecompileAddress; -use zksync_types::{Address, Execute}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{tester::VmTesterBuilder, utils::read_precompiles_contract}, - HistoryEnabled, - }, -}; - -#[test] -fn test_keccak() { - // Execute special transaction and check that at least 1000 keccak calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doKeccak(1000)`. - let keccak1000_calldata = - "370f20ac00000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(keccak1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let keccak_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Keccak256) - .count(); - - assert!(keccak_count >= 1000); -} - -#[test] -fn test_sha256() { - // Execute special transaction and check that at least 1000 sha256 calls were made. - let contract = read_precompiles_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) - .build(); - - // calldata for `doSha256(1000)`. - let sha1000_calldata = - "5d0b4fb500000000000000000000000000000000000000000000000000000000000003e8"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(sha1000_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let sha_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::SHA256) - .count(); - - assert!(sha_count >= 1000); -} - -#[test] -fn test_ecrecover() { - // Execute simple transfer and check that exactly 1 ecrecover call was made (it's done during tx validation). - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: account.address, - calldata: Vec::new(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); - - let ecrecover_count = vm - .vm - .state - .precompiles_processor - .precompile_cycles_history - .inner() - .iter() - .filter(|(precompile, _)| precompile == &PrecompileAddress::Ecrecover) - .count(); - - assert_eq!(ecrecover_count, 1); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs deleted file mode 100644 index 521bd81f2ef..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/refunds.rs +++ /dev/null @@ -1,167 +0,0 @@ -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::TransactionData, - HistoryEnabled, - }, -}; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs deleted file mode 100644 index 90c3206b24b..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/require_eip712.rs +++ /dev/null @@ -1,165 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; -use zksync_eth_signer::{raw_ethereum_tx::TransactionParameters, EthereumSigner}; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::{ - fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, Eip712Domain, Execute, - L2ChainId, Nonce, Transaction, U256, -}; - -use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::{ - tester::{Account, VmTester, VmTesterBuilder}, - utils::read_many_owners_custom_account_contract, - }, - HistoryDisabled, - }, -}; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs deleted file mode 100644 index cfaf1952c70..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/rollbacks.rs +++ /dev/null @@ -1,263 +0,0 @@ -use ethabi::Token; -use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::{ - interface::{ - dyn_tracers::vm_1_4_0::DynTracer, - tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{ - tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, - utils::read_test_contract, - }, - types::internals::ZkSyncVmState, - BootloaderState, HistoryEnabled, HistoryMode, SimpleMemory, ToTracerPointer, VmTracer, - }, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer> for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs deleted file mode 100644 index f6b1d83e02a..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/simple_execution.rs +++ /dev/null @@ -1,81 +0,0 @@ -use crate::{ - interface::{ExecutionResult, VmExecutionMode, VmInterface}, - vm_boojum_integration::{ - tests::tester::{TxType, VmTesterBuilder}, - HistoryDisabled, - }, -}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs deleted file mode 100644 index 078a971e4bf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/inner_state.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_4_0::{aux_structures::Timestamp, vm_state::VmLocalState}; -use crate::interface::storage::WriteStorage; -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::{ - vm_boojum_integration::{ - old_vm::{ - event_sink::InMemoryEventSink, - history_recorder::{AppDataFrameManagerWithHistory, HistoryRecorder}, - }, - HistoryEnabled, HistoryMode, SimpleMemory, Vm, - }, - HistoryMode as CommonHistoryMode, -}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs deleted file mode 100644 index 4d6572fe78a..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::{ - interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, - }, - vm_boojum_integration::{tests::tester::vm_tester::VmTester, HistoryEnabled}, -}; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs deleted file mode 100644 index fcea03e12cc..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tester/vm_tester.rs +++ /dev/null @@ -1,295 +0,0 @@ -use std::marker::PhantomData; - -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; -use zksync_types::{ - block::MiniblockHasher, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, ProtocolVersionId, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; - -use crate::{ - interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - }, - vm_boojum_integration::{ - constants::BLOCK_GAS_LIMIT, - tests::{ - tester::{Account, TxType}, - utils::read_test_contract, - }, - utils::l2_blocks::load_last_l2_block, - Vm, - }, - HistoryMode, -}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: std::marker::PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, - _phantom: PhantomData, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - _phantom: PhantomData, - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: MiniblockHasher::legacy_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs deleted file mode 100644 index 8c538dcf9bf..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/tracing_execution_error.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::{ - interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, - vm_boojum_integration::{ - tests::{ - tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, - utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, - }, - HistoryEnabled, - }, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs deleted file mode 100644 index bc3d62f62a1..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/upgrade.rs +++ /dev/null @@ -1,362 +0,0 @@ -use zk_evm_1_4_0::aux_structures::Timestamp; -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; -use zksync_types::{ - ethabi::{Contract, Token}, - get_code_key, get_known_code_key, - protocol_version::ProtocolUpgradeTxCommonData, - Address, Execute, ExecuteTransactionCommon, Transaction, COMPLEX_UPGRADER_ADDRESS, - CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, H160, H256, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use super::utils::read_test_contract; -use crate::{ - interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceHistoryEnabled, - }, - vm_boojum_integration::{ - tests::{tester::VmTesterBuilder, utils::verify_required_storage}, - HistoryEnabled, - }, -}; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - vm.vm - .storage - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - // Another random upgrade transaction - let another_protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(another_protocol_upgrade_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs b/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs deleted file mode 100644 index 4fba188ac5b..00000000000 --- a/core/lib/multivm/src/versions/vm_boojum_integration/tests/utils.rs +++ /dev/null @@ -1,111 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::{ - utils::storage_key_for_standard_token_balance, AccountTreeId, Address, StorageKey, H256, U256, -}; -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_boojum_integration::{ - tests::tester::InMemoryStorageView, types::internals::ZkSyncVmState, HistoryMode, -}; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} - -pub(crate) fn read_precompiles_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json", - ) -} diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs index ad740a279dc..8bf575effe0 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_boojum_integration::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs index c7b4a5537ac..ebc0a511d20 100644 --- a/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs +++ b/core/lib/multivm/src/versions/vm_boojum_integration/vm.rs @@ -11,7 +11,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_boojum_integration::{ @@ -90,7 +89,7 @@ impl VmInterface for Vm { /// Execute VM with custom tracers. fn inspect( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { self.inspect_inner(tracer, execution_mode) @@ -103,7 +102,7 @@ impl VmInterface for Vm { /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { @@ -125,12 +124,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); + let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs index f280f56a828..770f232019b 100644 --- a/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/bootloader_state/utils.rs @@ -171,8 +171,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_fast/bytecode.rs b/core/lib/multivm/src/versions/vm_fast/bytecode.rs index 02122e5f29c..b75e33a21b0 100644 --- a/core/lib/multivm/src/versions/vm_fast/bytecode.rs +++ b/core/lib/multivm/src/versions/vm_fast/bytecode.rs @@ -8,7 +8,7 @@ use crate::{ utils::bytecode, }; -impl Vm { +impl Vm { /// Checks the last transaction has successfully published compressed bytecodes and returns `true` if there is at least one is still unknown. pub(crate) fn has_unpublished_bytecodes(&mut self) -> bool { self.bootloader_state diff --git a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs index de6ead71e65..b48ec7eacb0 100644 --- a/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs +++ b/core/lib/multivm/src/versions/vm_fast/circuits_tracer.rs @@ -1,11 +1,13 @@ use circuit_sequencer_api_1_5_0::{geometry_config::get_geometry_config, toolset::GeometryConfig}; -use zksync_vm2::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; +use zksync_vm2::interface::{CycleStats, Opcode, OpcodeType, StateInterface, Tracer}; use zksync_vm_interface::CircuitStatistic; use crate::vm_latest::tracers::circuits_capacity::*; +/// VM tracer tracking [`CircuitStatistic`]s. Statistics generally depend on the number of time some opcodes were invoked, +/// and, for precompiles, invocation complexity (e.g., how many hashing cycles `keccak256` required). #[derive(Debug, Default, Clone, PartialEq)] -pub(crate) struct CircuitsTracer { +pub struct CircuitsTracer { main_vm_cycles: u32, ram_permutation_cycles: u32, storage_application_cycles: u32, @@ -17,7 +19,7 @@ pub(crate) struct CircuitsTracer { keccak256_cycles: u32, ecrecover_cycles: u32, sha256_cycles: u32, - secp256k1_verify_cycles: u32, + secp256r1_verify_cycles: u32, transient_storage_checker_cycles: u32, } @@ -115,7 +117,7 @@ impl Tracer for CircuitsTracer { CycleStats::Keccak256(cycles) => self.keccak256_cycles += cycles, CycleStats::Sha256(cycles) => self.sha256_cycles += cycles, CycleStats::EcRecover(cycles) => self.ecrecover_cycles += cycles, - CycleStats::Secp256k1Verify(cycles) => self.secp256k1_verify_cycles += cycles, + CycleStats::Secp256r1Verify(cycles) => self.secp256r1_verify_cycles += cycles, CycleStats::Decommit(cycles) => self.code_decommitter_cycles += cycles, CycleStats::StorageRead => self.storage_application_cycles += 1, CycleStats::StorageWrite => self.storage_application_cycles += 2, @@ -124,7 +126,8 @@ impl Tracer for CircuitsTracer { } impl CircuitsTracer { - pub(crate) fn circuit_statistic(&self) -> CircuitStatistic { + /// Obtains the current circuit stats from this tracer. + pub fn circuit_statistic(&self) -> CircuitStatistic { CircuitStatistic { main_vm: self.main_vm_cycles as f32 / GEOMETRY_CONFIG.cycles_per_vm_snapshot as f32, ram_permutation: self.ram_permutation_cycles as f32 @@ -146,7 +149,7 @@ impl CircuitsTracer { ecrecover: self.ecrecover_cycles as f32 / GEOMETRY_CONFIG.cycles_per_ecrecover_circuit as f32, sha256: self.sha256_cycles as f32 / GEOMETRY_CONFIG.cycles_per_sha256_circuit as f32, - secp256k1_verify: self.secp256k1_verify_cycles as f32 + secp256k1_verify: self.secp256r1_verify_cycles as f32 / GEOMETRY_CONFIG.cycles_per_secp256r1_verify_circuit as f32, transient_storage_checker: self.transient_storage_checker_cycles as f32 / GEOMETRY_CONFIG.cycles_per_transient_storage_sorter as f32, diff --git a/core/lib/multivm/src/versions/vm_fast/events.rs b/core/lib/multivm/src/versions/vm_fast/events.rs index 2312c3d97b4..294e8adce32 100644 --- a/core/lib/multivm/src/versions/vm_fast/events.rs +++ b/core/lib/multivm/src/versions/vm_fast/events.rs @@ -1,6 +1,6 @@ use zksync_types::{L1BatchNumber, H256}; use zksync_utils::h256_to_account_address; -use zksync_vm2::Event; +use zksync_vm2::interface::Event; use crate::interface::VmEvent; @@ -23,18 +23,21 @@ impl EventAccumulator { } } -pub(crate) fn merge_events(events: &[Event], block_number: L1BatchNumber) -> Vec { +pub(crate) fn merge_events( + events: impl Iterator, + block_number: L1BatchNumber, +) -> Vec { let mut result = vec![]; let mut current: Option<(usize, u32, EventAccumulator)> = None; - for message in events.iter() { + for event in events { let Event { shard_id, is_first, tx_number, key, value, - } = message.clone(); + } = event; if !is_first { if let Some((mut remaining_data_length, mut remaining_topics, mut event)) = diff --git a/core/lib/multivm/src/versions/vm_fast/glue.rs b/core/lib/multivm/src/versions/vm_fast/glue.rs index f24c82af11e..c2d38f351c0 100644 --- a/core/lib/multivm/src/versions/vm_fast/glue.rs +++ b/core/lib/multivm/src/versions/vm_fast/glue.rs @@ -1,18 +1,19 @@ use zksync_types::l2_to_l1_log::{L2ToL1Log, SystemL2ToL1Log}; use zksync_utils::u256_to_h256; +use zksync_vm2::interface; use crate::glue::GlueFrom; -impl GlueFrom<&zksync_vm2::L2ToL1Log> for SystemL2ToL1Log { - fn glue_from(value: &zksync_vm2::L2ToL1Log) -> Self { - let zksync_vm2::L2ToL1Log { +impl GlueFrom for SystemL2ToL1Log { + fn glue_from(value: interface::L2ToL1Log) -> Self { + let interface::L2ToL1Log { key, value, is_service, address, shard_id, tx_number, - } = *value; + } = value; Self(L2ToL1Log { shard_id, diff --git a/core/lib/multivm/src/versions/vm_fast/mod.rs b/core/lib/multivm/src/versions/vm_fast/mod.rs index f0d8bafe69e..bb5a342bff2 100644 --- a/core/lib/multivm/src/versions/vm_fast/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/mod.rs @@ -1,4 +1,6 @@ -pub use self::vm::Vm; +pub use zksync_vm2::interface::Tracer; + +pub use self::{circuits_tracer::CircuitsTracer, vm::Vm}; mod bootloader_state; mod bytecode; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs index 15af9d868ad..dd407c61668 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/block_tip.rs @@ -13,11 +13,12 @@ use zksync_types::{ use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; use super::{ - tester::{default_l1_batch, get_empty_storage, VmTesterBuilder}, + tester::{get_empty_storage, VmTesterBuilder}, utils::{get_complex_upgrade_abi, read_complex_upgrade}, }; use crate::{ interface::{L1BatchEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::default_l1_batch, vm_latest::constants::{ BOOTLOADER_BATCH_TIP_CIRCUIT_STATISTICS_OVERHEAD, BOOTLOADER_BATCH_TIP_METRICS_SIZE_OVERHEAD, BOOTLOADER_BATCH_TIP_OVERHEAD, @@ -147,7 +148,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { for (i, data) in txs_data.into_iter().enumerate() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), calldata: data, value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs index 5c1158a5909..48e1b10de44 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/bootloader.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use zksync_types::U256; -use zksync_vm2::HeapId; +use zksync_vm2::interface::HeapId; use crate::{ interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterfaceExt}, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs index 0270ac35475..f40e5336eb3 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/circuits.rs @@ -2,7 +2,7 @@ use zksync_types::{Address, Execute, U256}; use super::tester::VmTesterBuilder; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -21,7 +21,7 @@ fn test_circuits() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: Vec::new(), value: U256::from(1u8), factory_deps: vec![], @@ -29,7 +29,8 @@ fn test_circuits() { None, ); vm.vm.push_transaction(tx); - let res = vm.vm.inspect((), VmExecutionMode::OneTx); + let res = vm.vm.execute(VmExecutionMode::OneTx); + assert!(!res.result.is_failed(), "{res:#?}"); let s = res.statistics.circuit_statistic; // Check `circuit_statistic`. diff --git a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs index caea07617dd..34342d7f3b8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/code_oracle.rs @@ -6,12 +6,13 @@ use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_fast::{ - circuits_tracer::CircuitsTracer, tests::{ tester::{get_empty_storage, VmTesterBuilder}, utils::{load_precompiles_contract, read_precompiles_contract, read_test_contract}, }, + CircuitsTracer, }, }; @@ -41,10 +42,9 @@ fn test_code_oracle() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( precompile_contract_bytecode, precompiles_contract_address, - false, )]) .with_storage(storage) .build(); @@ -58,7 +58,7 @@ fn test_code_oracle() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -82,7 +82,7 @@ fn test_code_oracle() { // the decommitted bytecode gets erased (it shouldn't). let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -134,10 +134,9 @@ fn test_code_oracle_big_bytecode() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( precompile_contract_bytecode, precompiles_contract_address, - false, )]) .with_storage(storage) .build(); @@ -152,7 +151,7 @@ fn test_code_oracle_big_bytecode() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), @@ -198,10 +197,9 @@ fn refunds_in_code_oracle() { let mut vm = VmTesterBuilder::new() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( precompile_contract_bytecode.clone(), precompiles_contract_address, - false, )]) .with_storage(storage.clone()) .build(); @@ -212,7 +210,7 @@ fn refunds_in_code_oracle() { if decommit { let (_, is_fresh) = vm.vm.inner.world_diff_mut().decommit_opcode( &mut vm.vm.world, - &mut CircuitsTracer::default(), + &mut ((), CircuitsTracer::default()), h256_to_u256(normal_zkevm_bytecode_hash), ); assert!(is_fresh); @@ -220,7 +218,7 @@ fn refunds_in_code_oracle() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs index b7a2154bdc7..3f0a47b980e 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/gas_limit.rs @@ -18,7 +18,10 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute::default(), + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, Some(Fee { gas_limit, ..Account::default_fee() diff --git a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs index 3fcef71add0..62fa82f52f2 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/get_used_contracts.rs @@ -14,6 +14,7 @@ use crate::{ storage::ReadStorage, ExecutionResult, TxExecutionMode, VmExecutionMode, VmExecutionResultAndLogs, VmInterface, VmInterfaceExt, }, + versions::testonly::ContractToDeploy, vm_fast::{ tests::{ tester::{TxType, VmTester, VmTesterBuilder}, @@ -65,7 +66,7 @@ fn test_get_used_contracts() { let account2 = Account::random(); let tx2 = account2.get_l1_tx( Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata: big_calldata, value: Default::default(), factory_deps: vec![vec![1; 32]], @@ -116,14 +117,17 @@ struct ProxyCounterData { counter_bytecode_hash: U256, } -fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionResultAndLogs) { +fn execute_proxy_counter(gas: u32) -> (VmTester<()>, ProxyCounterData, VmExecutionResultAndLogs) { let counter_bytecode = inflated_counter_bytecode(); let counter_bytecode_hash = h256_to_u256(hash_bytecode(&counter_bytecode)); let counter_address = Address::repeat_byte(0x23); let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) .build(); @@ -150,7 +154,7 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, ProxyCounterData, VmExecutionRe let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: deploy_tx.address, + contract_address: Some(deploy_tx.address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) .unwrap(), @@ -197,7 +201,7 @@ fn get_used_contracts_with_out_of_gas_far_call() { let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: data.proxy_counter_address, + contract_address: Some(data.proxy_counter_address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(u64::MAX.into())]) .unwrap(), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs index 3b58565098d..1abb1e39e19 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l1_tx_execution.rs @@ -176,7 +176,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), factory_deps: vec![], calldata, diff --git a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs index a374f63608b..fde94d9da6c 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/l2_blocks.rs @@ -18,10 +18,8 @@ use crate::{ storage::ReadStorage, ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, }, - vm_fast::{ - tests::tester::{default_l1_batch, VmTesterBuilder}, - vm::Vm, - }, + versions::testonly::default_l1_batch, + vm_fast::{tests::tester::VmTesterBuilder, vm::Vm}, vm_latest::{ constants::{TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO}, utils::l2_blocks::get_l2_block_hash_key, @@ -37,7 +35,7 @@ fn get_l1_noop() -> Transaction { ..Default::default() }), execute: Execute { - contract_address: H160::zero(), + contract_address: Some(H160::zero()), calldata: vec![], value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs index 122b3860117..f72e95da9f8 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/nonce_holder.rs @@ -5,6 +5,7 @@ use crate::{ ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterfaceExt, VmRevertReason, }, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{Account, VmTesterBuilder}, utils::read_nonce_holder_tester, @@ -41,10 +42,9 @@ fn test_nonce_holder() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_deployer() - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::account( read_nonce_holder_tester().to_vec(), account.address, - true, )]) .with_rich_accounts(vec![account.clone()]) .build(); @@ -59,7 +59,7 @@ fn test_nonce_holder() { vm.reset_state(true); let mut transaction = account.get_l2_tx_for_execute_with_nonce( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![12], value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs index f77eeb4f126..b3ca1596217 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/precompiles.rs @@ -3,7 +3,8 @@ use zksync_types::{Address, Execute}; use super::{tester::VmTesterBuilder, utils::read_precompiles_contract}; use crate::{ - interface::{TxExecutionMode, VmExecutionMode, VmInterface}, + interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; @@ -18,7 +19,7 @@ fn test_keccak() { .with_deployer() .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) .build(); // calldata for `doKeccak(1000)`. @@ -28,7 +29,7 @@ fn test_keccak() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(keccak1000_calldata).unwrap(), value: 0.into(), factory_deps: vec![], @@ -36,7 +37,8 @@ fn test_keccak() { None, ); vm.vm.push_transaction(tx); - let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + + let exec_result = vm.vm.execute(VmExecutionMode::OneTx); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let keccak_count = exec_result.statistics.circuit_statistic.keccak256 @@ -55,7 +57,7 @@ fn test_sha256() { .with_deployer() .with_bootloader_gas_limit(BATCH_COMPUTATIONAL_GAS_LIMIT) .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contract, address, true)]) + .with_custom_contracts(vec![ContractToDeploy::account(contract, address)]) .build(); // calldata for `doSha256(1000)`. @@ -65,7 +67,7 @@ fn test_sha256() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(sha1000_calldata).unwrap(), value: 0.into(), factory_deps: vec![], @@ -73,7 +75,8 @@ fn test_sha256() { None, ); vm.vm.push_transaction(tx); - let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + + let exec_result = vm.vm.execute(VmExecutionMode::OneTx); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let sha_count = exec_result.statistics.circuit_statistic.sha256 @@ -95,7 +98,7 @@ fn test_ecrecover() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![], value: 0.into(), factory_deps: vec![], @@ -103,7 +106,8 @@ fn test_ecrecover() { None, ); vm.vm.push_transaction(tx); - let exec_result = vm.vm.inspect((), VmExecutionMode::OneTx); + + let exec_result = vm.vm.execute(VmExecutionMode::OneTx); assert!(!exec_result.result.is_failed(), "{exec_result:#?}"); let ecrecover_count = exec_result.statistics.circuit_statistic.ecrecover diff --git a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs index 5ad6e3fa4f3..1856995149a 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/refunds.rs @@ -3,6 +3,7 @@ use zksync_types::{Address, Execute, U256}; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{DeployContractsTx, TxType, VmTesterBuilder}, utils::{read_expensive_contract, read_test_contract}, @@ -172,16 +173,15 @@ fn negative_pubdata_for_transaction() { .with_empty_in_memory_storage() .with_execution_mode(TxExecutionMode::VerifyExecute) .with_random_rich_accounts(1) - .with_custom_contracts(vec![( + .with_custom_contracts(vec![ContractToDeploy::new( expensive_contract_bytecode, expensive_contract_address, - false, )]) .build(); let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: expensive_function .encode_input(&[Token::Uint(10.into())]) .unwrap(), @@ -200,7 +200,7 @@ fn negative_pubdata_for_transaction() { // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: cleanup_function.encode_input(&[]).unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs index 68e49b202a9..88fe2dab5c9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/require_eip712.rs @@ -1,5 +1,5 @@ use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; +use zksync_eth_signer::TransactionParameters; use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, @@ -12,13 +12,14 @@ use crate::{ interface::{ storage::ReadStorage, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, }, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{Account, VmTester, VmTesterBuilder}, utils::read_many_owners_custom_account_contract, }, }; -impl VmTester { +impl VmTester<()> { pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { let key = storage_key_for_standard_token_balance( AccountTreeId::new(L2_BASE_TOKEN_ADDRESS), @@ -37,8 +38,8 @@ impl VmTester { /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. /// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -#[tokio::test] -async fn test_require_eip712() { +#[test] +fn test_require_eip712() { // Use 3 accounts: // - `private_address` - EOA account, where we have the key // - `account_address` - AA account, where the contract is deployed @@ -50,7 +51,10 @@ async fn test_require_eip712() { let (bytecode, contract) = read_many_owners_custom_account_contract(); let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) + .with_custom_contracts(vec![ContractToDeploy::account( + bytecode, + account_abstraction.address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) .build(); @@ -68,7 +72,7 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(account_abstraction.address), calldata: encoded_input, value: Default::default(), factory_deps: vec![], @@ -100,7 +104,7 @@ async fn test_require_eip712() { blob_versioned_hashes: None, }; - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); @@ -125,7 +129,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary.address), vec![], Nonce(1), Fee { @@ -147,7 +151,6 @@ async fn test_require_eip712() { let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) - .await .unwrap(); let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); diff --git a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs index a677a61c602..cff72d8ec5a 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/rollbacks.rs @@ -1,9 +1,12 @@ +use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{Execute, U256}; +use zksync_types::{Address, Execute, U256}; +use zksync_vm_interface::VmInterfaceExt; use crate::{ - interface::TxExecutionMode, + interface::{ExecutionResult, TxExecutionMode}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder}, utils::read_test_contract, @@ -83,7 +86,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -101,7 +104,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -142,3 +145,32 @@ fn test_vm_loadnext_rollbacks() { assert_eq!(result_without_rollbacks, result_with_rollbacks); } + +#[test] +fn rollback_in_call_mode() { + let counter_bytecode = read_test_contract(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new() + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![ContractToDeploy::new( + counter_bytecode, + counter_address, + )]) + .with_random_rich_accounts(1) + .build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + assert_eq!(vm_result.logs.storage_logs, []); +} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs index a61a0a2bd91..55ca372c4a9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/sekp256r1.rs @@ -48,7 +48,7 @@ fn test_sekp256r1() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), calldata: [digest, encoded_r, encoded_s, x, y].concat(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs index 7fe15ca7bcd..2cfadb640e7 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/storage.rs @@ -6,6 +6,7 @@ use crate::{ interface::{ TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, }, + versions::testonly::ContractToDeploy, vm_fast::tests::tester::VmTesterBuilder, }; @@ -23,14 +24,14 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 .with_execution_mode(TxExecutionMode::VerifyExecute) .with_deployer() .with_random_rich_accounts(1) - .with_custom_contracts(vec![(bytecode, test_contract_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new(bytecode, test_contract_address)]) .build(); let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata: first_tx_calldata, value: 0.into(), factory_deps: vec![], @@ -40,7 +41,7 @@ fn test_storage(first_tx_calldata: Vec, second_tx_calldata: Vec) -> u32 let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata: second_tx_calldata, value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs index 781069ddf49..212e569d510 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/mod.rs @@ -1,5 +1,5 @@ pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, get_empty_storage, VmTester, VmTesterBuilder}; +pub(crate) use vm_tester::{get_empty_storage, VmTester, VmTesterBuilder}; pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; mod transaction_test_info; diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs index ce45390260c..e6506ff225b 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/transaction_test_info.rs @@ -1,6 +1,7 @@ use std::fmt; use zksync_types::{ExecuteTransactionCommon, Transaction, H160, U256}; +use zksync_vm2::interface::{Event, StateInterface}; use super::VmTester; use crate::{ @@ -190,7 +191,7 @@ impl TransactionTestInfo { struct VmStateDump { state: S, storage_writes: Vec<((H160, U256), U256)>, - events: Box<[zksync_vm2::Event]>, + events: Box<[Event]>, } impl PartialEq for VmStateDump { @@ -205,19 +206,13 @@ impl Vm { fn dump_state(&self) -> VmStateDump { VmStateDump { state: self.inner.dump_state(), - storage_writes: self - .inner - .world_diff() - .get_storage_state() - .iter() - .map(|(k, v)| (*k, *v)) - .collect(), - events: self.inner.world_diff().events().into(), + storage_writes: self.inner.get_storage_state().collect(), + events: self.inner.events().collect(), } } } -impl VmTester { +impl VmTester<()> { pub(crate) fn execute_and_verify_txs( &mut self, txs: &[TransactionTestInfo], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs index 8071bcf51d4..9549b32c4f1 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tester/vm_tester.rs @@ -3,38 +3,35 @@ use std::{cell::RefCell, rc::Rc}; use zksync_contracts::BaseSystemContracts; use zksync_test_account::{Account, TxType}; use zksync_types::{ - block::L2BlockHasher, - fee_model::BatchFeeInput, - get_code_key, get_is_account_key, - helpers::unix_timestamp_ms, - utils::{deployed_address_create, storage_key_for_eth_balance}, - AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, - StorageKey, U256, + block::L2BlockHasher, utils::deployed_address_create, AccountTreeId, Address, L1BatchNumber, + L2BlockNumber, Nonce, StorageKey, }; use zksync_utils::{bytecode::hash_bytecode, u256_to_h256}; -use zksync_vm2::WorldDiff; +use zksync_vm2::{interface::Tracer, WorldDiff}; use crate::{ interface::{ storage::{InMemoryStorage, StoragePtr}, L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, VmInterface, - VmInterfaceExt, }, - versions::vm_fast::{tests::utils::read_test_contract, vm::Vm}, - vm_latest::{constants::BATCH_COMPUTATIONAL_GAS_LIMIT, utils::l2_blocks::load_last_l2_block}, + versions::{ + testonly::{default_l1_batch, default_system_env, make_account_rich, ContractToDeploy}, + vm_fast::{tests::utils::read_test_contract, vm::Vm}, + }, + vm_latest::utils::l2_blocks::load_last_l2_block, }; -pub(crate) struct VmTester { - pub(crate) vm: Vm>, +pub(crate) struct VmTester { + pub(crate) vm: Vm, Tr>, pub(crate) storage: StoragePtr, pub(crate) deployer: Option, pub(crate) test_contract: Option
, pub(crate) fee_account: Address, pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, + pub(crate) custom_contracts: Vec, } -impl VmTester { +impl VmTester { pub(crate) fn deploy_test_contract(&mut self) { let contract = read_test_contract(); let tx = self @@ -45,7 +42,7 @@ impl VmTester { .tx; let nonce = tx.nonce().unwrap().0.into(); self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); + self.vm.inspect(&mut Tr::default(), VmExecutionMode::OneTx); let deployed_address = deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); self.test_contract = Some(deployed_address); @@ -63,10 +60,10 @@ impl VmTester { pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { for account in self.rich_accounts.iter_mut() { account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); + make_account_rich(&mut self.storage.borrow_mut(), account); } if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); + make_account_rich(&mut self.storage.borrow_mut(), deployer); } if !self.custom_contracts.is_empty() { @@ -99,7 +96,7 @@ impl VmTester { }; } - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), storage); + let vm = Vm::custom(l1_batch, self.vm.system_env.clone(), storage); if self.test_contract.is_some() { self.deploy_test_contract(); @@ -108,15 +105,13 @@ impl VmTester { } } -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - pub(crate) struct VmTesterBuilder { storage: Option, l1_batch_env: Option, system_env: SystemEnv, deployer: Option, rich_accounts: Vec, - custom_contracts: Vec, + custom_contracts: Vec, } impl Clone for VmTesterBuilder { @@ -132,21 +127,12 @@ impl Clone for VmTesterBuilder { } } -#[allow(dead_code)] impl VmTesterBuilder { pub(crate) fn new() -> Self { Self { storage: None, l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, + system_env: default_system_env(), deployer: None, rich_accounts: vec![], custom_contracts: vec![], @@ -158,11 +144,6 @@ impl VmTesterBuilder { self } - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { self.storage = Some(storage); self @@ -210,28 +191,28 @@ impl VmTesterBuilder { self } - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { + pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { self.custom_contracts = contracts; self } - pub(crate) fn build(self) -> VmTester { + pub(crate) fn build(self) -> VmTester<()> { let l1_batch_env = self .l1_batch_env .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); + ContractToDeploy::insert_all(&self.custom_contracts, &mut raw_storage); let storage_ptr = Rc::new(RefCell::new(raw_storage)); for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); + make_account_rich(&mut storage_ptr.borrow_mut(), account); } if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); + make_account_rich(&mut storage_ptr.borrow_mut(), deployer); } let fee_account = l1_batch_env.fee_account; - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); + let vm = Vm::custom(l1_batch_env, self.system_env, storage_ptr.clone()); VmTester { vm, @@ -245,53 +226,6 @@ impl VmTesterBuilder { } } -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - fee_input: BatchFeeInput::l1_pegged( - 50_000_000_000, // 50 gwei - 250_000_000, // 0.25 gwei - ), - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: L2BlockHasher::legacy_hash(L2BlockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - pub(crate) fn get_empty_storage() -> InMemoryStorage { InMemoryStorage::with_system_contracts(hash_bytecode) } - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs index 75144839006..89f0fa23620 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/tracing_execution_error.rs @@ -2,6 +2,7 @@ use zksync_types::{Execute, H160}; use crate::{ interface::{TxExecutionMode, TxRevertReason, VmRevertReason}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{ExpectedError, TransactionTestInfo, VmTesterBuilder}, utils::{get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS}, @@ -14,7 +15,10 @@ fn test_tracing_of_execution_errors() { let mut vm = VmTesterBuilder::new() .with_empty_in_memory_storage() .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) + .with_custom_contracts(vec![ContractToDeploy::new( + read_error_contract(), + contract_address, + )]) .with_execution_mode(TxExecutionMode::VerifyExecute) .with_deployer() .with_random_rich_accounts(1) @@ -24,7 +28,7 @@ fn test_tracing_of_execution_errors() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs index 57877854031..ef510546f11 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/transfer.rs @@ -6,6 +6,7 @@ use zksync_utils::u256_to_h256; use crate::{ interface::{TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt}, + versions::testonly::ContractToDeploy, vm_fast::tests::{ tester::{get_empty_storage, VmTesterBuilder}, utils::get_balance, @@ -21,7 +22,7 @@ fn test_send_or_transfer(test_option: TestOptions) { let test_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", ); - let recipeint_bytecode = read_bytecode( + let recipient_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/Recipient.json", ); let test_abi = load_contract( @@ -62,15 +63,15 @@ fn test_send_or_transfer(test_option: TestOptions) { .with_deployer() .with_random_rich_accounts(1) .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - (recipeint_bytecode, recipient_address, false), + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(recipient_bytecode, recipient_address), ]) .build(); let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: U256::zero(), factory_deps: vec![], @@ -110,7 +111,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let test_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/TransferTest.json", ); - let reentrant_recipeint_bytecode = read_bytecode( + let reentrant_recipient_bytecode = read_bytecode( "etc/contracts-test-data/artifacts-zk/contracts/transfer/transfer.sol/ReentrantRecipient.json", ); let test_abi = load_contract( @@ -121,7 +122,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { ); let test_contract_address = Address::random(); - let reentrant_recipeint_address = Address::random(); + let reentrant_recipient_address = Address::random(); let (value, calldata) = match test_option { TestOptions::Send(value) => ( @@ -130,7 +131,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { .function("send") .unwrap() .encode_input(&[ - Token::Address(reentrant_recipeint_address), + Token::Address(reentrant_recipient_address), Token::Uint(value), ]) .unwrap(), @@ -141,7 +142,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { .function("transfer") .unwrap() .encode_input(&[ - Token::Address(reentrant_recipeint_address), + Token::Address(reentrant_recipient_address), Token::Uint(value), ]) .unwrap(), @@ -154,12 +155,8 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { .with_deployer() .with_random_rich_accounts(1) .with_custom_contracts(vec![ - (test_bytecode, test_contract_address, false), - ( - reentrant_recipeint_bytecode, - reentrant_recipeint_address, - false, - ), + ContractToDeploy::new(test_bytecode, test_contract_address), + ContractToDeploy::new(reentrant_recipient_bytecode, reentrant_recipient_address), ]) .build(); @@ -167,7 +164,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: reentrant_recipeint_address, + contract_address: Some(reentrant_recipient_address), calldata: reentrant_recipient_abi .function("setX") .unwrap() @@ -188,7 +185,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value, factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs index dd25c209740..ba4863f7c45 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/upgrade.rs @@ -265,7 +265,7 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![], value: U256::zero(), @@ -315,7 +315,7 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs index d91e1307651..5ab5aa0dec9 100644 --- a/core/lib/multivm/src/versions/vm_fast/tests/utils.rs +++ b/core/lib/multivm/src/versions/vm_fast/tests/utils.rs @@ -10,7 +10,7 @@ use zksync_types::{ U256, }; use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; -use zksync_vm2::{HeapId, StateInterface}; +use zksync_vm2::interface::{HeapId, StateInterface}; use crate::interface::storage::ReadStorage; diff --git a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs index 502be0dc22c..2ec86eb3cea 100644 --- a/core/lib/multivm/src/versions/vm_fast/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_fast/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_latest::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -305,7 +305,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_fast/vm.rs b/core/lib/multivm/src/versions/vm_fast/vm.rs index 5a73ce49b06..10be6d88b04 100644 --- a/core/lib/multivm/src/versions/vm_fast/vm.rs +++ b/core/lib/multivm/src/versions/vm_fast/vm.rs @@ -1,6 +1,8 @@ -use std::{collections::HashMap, fmt}; +use std::{collections::HashMap, fmt, mem}; -use zk_evm_1_5_0::zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION; +use zk_evm_1_5_0::{ + aux_structures::LogQuery, zkevm_opcode_defs::system_params::INITIAL_FRAME_FORMAL_EH_LOCATION, +}; use zksync_contracts::SystemContractCode; use zksync_types::{ l1::is_l1_tx_type, @@ -11,13 +13,13 @@ use zksync_types::{ BYTES_PER_ENUMERATION_INDEX, }, AccountTreeId, StorageKey, StorageLog, StorageLogKind, StorageLogWithPreviousValue, - BOOTLOADER_ADDRESS, H160, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, + BOOTLOADER_ADDRESS, H160, H256, KNOWN_CODES_STORAGE_ADDRESS, L1_MESSENGER_ADDRESS, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::{bytecode::hash_bytecode, h256_to_u256, u256_to_h256}; use zksync_vm2::{ - decode::decode_program, CallframeInterface, ExecutionEnd, FatPointer, HeapId, Program, - Settings, StateInterface, Tracer, VirtualMachine, + interface::{CallframeInterface, HeapId, StateInterface, Tracer}, + ExecutionEnd, FatPointer, Program, Settings, StorageSlot, VirtualMachine, }; use super::{ @@ -31,11 +33,12 @@ use super::{ use crate::{ glue::GlueInto, interface::{ - storage::ReadStorage, BytecodeCompressionError, BytecodeCompressionResult, - CurrentExecutionState, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, - Refunds, SystemEnv, TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, - VmExecutionResultAndLogs, VmExecutionStatistics, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, VmRevertReason, + storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, + BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, + ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, Refunds, SystemEnv, + TxRevertReason, VmEvent, VmExecutionLogs, VmExecutionMode, VmExecutionResultAndLogs, + VmExecutionStatistics, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, + VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_fast::{ @@ -55,9 +58,41 @@ use crate::{ const VM_VERSION: MultiVMSubversion = MultiVMSubversion::IncreasedBootloaderMemory; -pub struct Vm { - pub(crate) world: World, - pub(crate) inner: VirtualMachine>, +type FullTracer = (Tr, CircuitsTracer); + +#[derive(Debug)] +struct VmRunResult { + execution_result: ExecutionResult, + /// `true` if VM execution has terminated (as opposed to being stopped on a hook, e.g. when executing a single transaction + /// in a batch). Used for `execution_result == Revert { .. }` to understand whether VM logs should be reverted. + execution_ended: bool, + refunds: Refunds, + /// This value is used in stats. It's defined in the old VM as the latest value used when computing refunds (see the refunds tracer for `vm_latest`). + /// This is **not** equal to the pubdata diff before and after VM execution; e.g., when executing a batch tip, + /// `pubdata_published` is always 0 (since no refunds are computed). + pubdata_published: u32, +} + +impl VmRunResult { + fn should_ignore_vm_logs(&self) -> bool { + match &self.execution_result { + ExecutionResult::Success { .. } => false, + ExecutionResult::Halt { .. } => true, + // Logs generated during reverts should only be ignored if the revert has reached the root (bootloader) call frame, + // which is only possible with `TxExecutionMode::EthCall`. + ExecutionResult::Revert { .. } => self.execution_ended, + } + } +} + +/// Fast VM wrapper. +/// +/// The wrapper is parametric by the storage and tracer types. Besides the [`Tracer`] trait, a tracer must have `'static` lifetime +/// and implement [`Default`] (the latter is necessary to complete batches). [`CircuitsTracer`] is currently always enabled; +/// you don't need to specify it explicitly. +pub struct Vm { + pub(crate) world: World>, + pub(crate) inner: VirtualMachine, World>>, gas_for_account_validation: u32, pub(crate) bootloader_state: BootloaderState, pub(crate) batch_env: L1BatchEnv, @@ -67,38 +102,100 @@ pub struct Vm { enforced_state_diffs: Option>, } -impl Vm { +impl Vm { + pub fn custom(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { + let default_aa_code_hash = system_env + .base_system_smart_contracts + .default_aa + .hash + .into(); + + let program_cache = HashMap::from([World::convert_system_contract_code( + &system_env.base_system_smart_contracts.default_aa, + false, + )]); + + let (_, bootloader) = World::convert_system_contract_code( + &system_env.base_system_smart_contracts.bootloader, + true, + ); + let bootloader_memory = bootloader_initial_memory(&batch_env); + + let mut inner = VirtualMachine::new( + BOOTLOADER_ADDRESS, + bootloader, + H160::zero(), + &[], + system_env.bootloader_gas_limit, + Settings { + default_aa_code_hash, + // this will change after 1.5 + evm_interpreter_code_hash: default_aa_code_hash, + hook_address: get_vm_hook_position(VM_VERSION) * 32, + }, + ); + + inner.current_frame().set_stack_pointer(0); + // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. + inner.current_frame().set_heap_bound(u32::MAX); + inner.current_frame().set_aux_heap_bound(u32::MAX); + inner + .current_frame() + .set_exception_handler(INITIAL_FRAME_FORMAL_EH_LOCATION); + + let mut this = Self { + world: World::new(storage, program_cache), + inner, + gas_for_account_validation: system_env.default_validation_computational_gas_limit, + bootloader_state: BootloaderState::new( + system_env.execution_mode, + bootloader_memory.clone(), + batch_env.first_l2_block, + ), + system_env, + batch_env, + snapshot: None, + #[cfg(test)] + enforced_state_diffs: None, + }; + this.write_to_bootloader_heap(bootloader_memory); + this + } + fn run( &mut self, execution_mode: VmExecutionMode, - tracer: &mut CircuitsTracer, + tracer: &mut (Tr, CircuitsTracer), track_refunds: bool, - ) -> (ExecutionResult, Refunds) { + ) -> VmRunResult { let mut refunds = Refunds { gas_refunded: 0, operator_suggested_refund: 0, }; let mut last_tx_result = None; - let mut pubdata_before = self.inner.world_diff().pubdata() as u32; + let mut pubdata_before = self.inner.pubdata() as u32; + let mut pubdata_published = 0; - let result = loop { + let (execution_result, execution_ended) = loop { let hook = match self.inner.run(&mut self.world, tracer) { ExecutionEnd::SuspendedOnHook(hook) => hook, - ExecutionEnd::ProgramFinished(output) => break ExecutionResult::Success { output }, + ExecutionEnd::ProgramFinished(output) => { + break (ExecutionResult::Success { output }, true); + } ExecutionEnd::Reverted(output) => { - break match TxRevertReason::parse_error(&output) { + let result = match TxRevertReason::parse_error(&output) { TxRevertReason::TxReverted(output) => ExecutionResult::Revert { output }, TxRevertReason::Halt(reason) => ExecutionResult::Halt { reason }, - } + }; + break (result, true); } ExecutionEnd::Panicked => { - break ExecutionResult::Halt { - reason: if self.gas_remaining() == 0 { - Halt::BootloaderOutOfGas - } else { - Halt::VMPanic - }, - } + let reason = if self.gas_remaining() == 0 { + Halt::BootloaderOutOfGas + } else { + Halt::VMPanic + }; + break (ExecutionResult::Halt { reason }, true); } }; @@ -108,7 +205,7 @@ impl Vm { } Hook::TxHasEnded => { if let VmExecutionMode::OneTx = execution_mode { - break last_tx_result.take().unwrap(); + break (last_tx_result.take().unwrap(), false); } } Hook::AskOperatorForRefund => { @@ -125,7 +222,8 @@ impl Vm { ) .as_u64(); - let pubdata_published = self.inner.world_diff().pubdata() as u32; + let pubdata_after = self.inner.pubdata() as u32; + pubdata_published = pubdata_after.saturating_sub(pubdata_before); refunds.operator_suggested_refund = compute_refund( &self.batch_env, @@ -133,7 +231,7 @@ impl Vm { gas_spent_on_pubdata.as_u64(), tx_gas_limit, gas_per_pubdata_byte.low_u32(), - pubdata_published.saturating_sub(pubdata_before), + pubdata_published, self.bootloader_state .last_l2_block() .txs @@ -142,7 +240,7 @@ impl Vm { .hash, ); - pubdata_before = pubdata_published; + pubdata_before = pubdata_after; let refund_value = refunds.operator_suggested_refund; self.write_to_bootloader_heap([( OPERATOR_REFUNDS_OFFSET + current_tx_index, @@ -186,8 +284,7 @@ impl Vm { unreachable!("We do not provide the pubdata when executing the block tip or a single transaction"); } - let events = - merge_events(self.inner.world_diff().events(), self.batch_env.number); + let events = merge_events(self.inner.events(), self.batch_env.number); let published_bytecodes = events .iter() @@ -239,7 +336,12 @@ impl Vm { } }; - (result, refunds) + VmRunResult { + execution_result, + execution_ended, + refunds, + pubdata_published, + } } fn get_hook_params(&self) -> [U256; 3] { @@ -364,24 +466,24 @@ impl Vm { } let storage = &mut self.world.storage; - let diffs = self.inner.world_diff().get_storage_changes().map( - move |((address, key), (initial_value, final_value))| { - let storage_key = StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); - StateDiffRecord { - address, - key, - derived_key: - zk_evm_1_5_0::aux_structures::LogQuery::derive_final_address_for_params( - &address, &key, - ), - enumeration_index: storage - .get_enumeration_index(&storage_key) - .unwrap_or_default(), - initial_value: initial_value.unwrap_or_default(), - final_value, - } - }, - ); + let diffs = + self.inner + .world_diff() + .get_storage_changes() + .map(move |((address, key), change)| { + let storage_key = + StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)); + StateDiffRecord { + address, + key, + derived_key: LogQuery::derive_final_address_for_params(&address, &key), + enumeration_index: storage + .get_enumeration_index(&storage_key) + .unwrap_or_default(), + initial_value: change.before, + final_value: change.after, + } + }); diffs .filter(|diff| diff.address != L1_MESSENGER_ADDRESS) .collect() @@ -394,74 +496,12 @@ impl Vm { pub(super) fn gas_remaining(&mut self) -> u32 { self.inner.current_frame().gas() } -} - -// We don't implement `VmFactory` trait because, unlike old VMs, the new VM doesn't require storage to be writable; -// it maintains its own storage cache and a write buffer. -impl Vm { - pub fn new(batch_env: L1BatchEnv, system_env: SystemEnv, storage: S) -> Self { - let default_aa_code_hash = system_env - .base_system_smart_contracts - .default_aa - .hash - .into(); - - let program_cache = HashMap::from([World::convert_system_contract_code( - &system_env.base_system_smart_contracts.default_aa, - false, - )]); - - let (_, bootloader) = World::convert_system_contract_code( - &system_env.base_system_smart_contracts.bootloader, - true, - ); - let bootloader_memory = bootloader_initial_memory(&batch_env); - - let mut inner = VirtualMachine::new( - BOOTLOADER_ADDRESS, - bootloader, - H160::zero(), - vec![], - system_env.bootloader_gas_limit, - Settings { - default_aa_code_hash, - // this will change after 1.5 - evm_interpreter_code_hash: default_aa_code_hash, - hook_address: get_vm_hook_position(VM_VERSION) * 32, - }, - ); - - inner.current_frame().set_stack_pointer(0); - // The bootloader writes results to high addresses in its heap, so it makes sense to preallocate it. - inner.current_frame().set_heap_bound(u32::MAX); - inner.current_frame().set_aux_heap_bound(u32::MAX); - inner - .current_frame() - .set_exception_handler(INITIAL_FRAME_FORMAL_EH_LOCATION); - - let mut this = Self { - world: World::new(storage, program_cache), - inner, - gas_for_account_validation: system_env.default_validation_computational_gas_limit, - bootloader_state: BootloaderState::new( - system_env.execution_mode, - bootloader_memory.clone(), - batch_env.first_l2_block, - ), - system_env, - batch_env, - snapshot: None, - #[cfg(test)] - enforced_state_diffs: None, - }; - this.write_to_bootloader_heap(bootloader_memory); - this - } // visible for testing pub(super) fn get_current_execution_state(&self) -> CurrentExecutionState { let world_diff = self.inner.world_diff(); - let events = merge_events(world_diff.events(), self.batch_env.number); + let vm = &self.inner; + let events = merge_events(vm.events(), self.batch_env.number); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) .into_iter() @@ -473,33 +513,38 @@ impl Vm { events, deduplicated_storage_logs: world_diff .get_storage_changes() - .map(|((address, key), (_, value))| StorageLog { + .map(|((address, key), change)| StorageLog { key: StorageKey::new(AccountTreeId::new(address), u256_to_h256(key)), - value: u256_to_h256(value), + value: u256_to_h256(change.after), kind: StorageLogKind::RepeatedWrite, // Initialness doesn't matter here }) .collect(), used_contract_hashes: self.decommitted_hashes().collect(), - system_logs: world_diff - .l2_to_l1_logs() - .iter() - .map(|x| x.glue_into()) - .collect(), + system_logs: vm.l2_to_l1_logs().map(GlueInto::glue_into).collect(), user_l2_to_l1_logs, storage_refunds: world_diff.storage_refunds().to_vec(), pubdata_costs: world_diff.pubdata_costs().to_vec(), } } +} - fn delete_history_if_appropriate(&mut self) { - if self.snapshot.is_none() && !self.has_previous_far_calls() { - self.inner.delete_history(); - } +impl VmFactory> for Vm, Tr> +where + S: ReadStorage, + Tr: Tracer + Default + 'static, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let storage = ImmutableStorageView::new(storage); + Self::custom(batch_env, system_env, storage) } } -impl VmInterface for Vm { - type TracerDispatcher = (); +impl VmInterface for Vm { + type TracerDispatcher = Tr; fn push_transaction(&mut self, tx: zksync_types::Transaction) { self.push_transaction_inner(tx, 0, true); @@ -507,7 +552,7 @@ impl VmInterface for Vm { fn inspect( &mut self, - (): Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { let mut track_refunds = false; @@ -517,17 +562,20 @@ impl VmInterface for Vm { track_refunds = true; } - let mut tracer = CircuitsTracer::default(); let start = self.inner.world_diff().snapshot(); - let pubdata_before = self.inner.world_diff().pubdata(); let gas_before = self.gas_remaining(); - let (result, refunds) = self.run(execution_mode, &mut tracer, track_refunds); - let ignore_world_diff = matches!(execution_mode, VmExecutionMode::OneTx) - && matches!(result, ExecutionResult::Halt { .. }); + let mut full_tracer = (mem::take(tracer), CircuitsTracer::default()); + let result = self.run(execution_mode, &mut full_tracer, track_refunds); + *tracer = full_tracer.0; // place the tracer back + + let ignore_world_diff = + matches!(execution_mode, VmExecutionMode::OneTx) && result.should_ignore_vm_logs(); // If the execution is halted, the VM changes are expected to be rolled back by the caller. // Earlier VMs return empty execution logs in this case, so we follow this behavior. + // Likewise, if a revert has reached the bootloader frame (possible with `TxExecutionMode::EthCall`; otherwise, the bootloader catches reverts), + // old VMs revert all logs; the new VM doesn't do that automatically, so we recreate this behavior here. let logs = if ignore_world_diff { VmExecutionLogs::default() } else { @@ -545,11 +593,11 @@ impl VmInterface for Vm { StorageLogKind::RepeatedWrite }, }, - previous_value: u256_to_h256(change.before.unwrap_or_default()), + previous_value: u256_to_h256(change.before), }) .collect(); let events = merge_events( - self.inner.world_diff().events_after(&start), + self.inner.world_diff().events_after(&start).iter().copied(), self.batch_env.number, ); let user_l2_to_l1_logs = extract_l2tol1logs_from_l1_messenger(&events) @@ -562,7 +610,7 @@ impl VmInterface for Vm { .world_diff() .l2_to_l1_logs_after(&start) .iter() - .map(|x| x.glue_into()) + .map(|&log| log.glue_into()) .collect(); VmExecutionLogs { storage_logs, @@ -573,35 +621,35 @@ impl VmInterface for Vm { } }; - let pubdata_after = self.inner.world_diff().pubdata(); - let circuit_statistic = tracer.circuit_statistic(); let gas_remaining = self.gas_remaining(); + let gas_used = gas_before - gas_remaining; + VmExecutionResultAndLogs { - result, + result: result.execution_result, logs, // TODO (PLA-936): Fill statistics; investigate whether they should be zeroed on `Halt` statistics: VmExecutionStatistics { + gas_used: gas_used.into(), + gas_remaining, + computational_gas_used: gas_used, // since 1.5.0, this always has the same value as `gas_used` + pubdata_published: result.pubdata_published, + circuit_statistic: full_tracer.1.circuit_statistic(), contracts_used: 0, cycles_used: 0, - gas_used: (gas_before - gas_remaining).into(), - gas_remaining, - computational_gas_used: 0, total_log_queries: 0, - pubdata_published: (pubdata_after - pubdata_before).max(0) as u32, - circuit_statistic, }, - refunds, + refunds: result.refunds, } } fn inspect_transaction_with_bytecode_compression( &mut self, - (): Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, tx: zksync_types::Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.push_transaction_inner(tx, 0, with_compression); - let result = self.inspect((), VmExecutionMode::OneTx); + let result = self.inspect(tracer, VmExecutionMode::OneTx); let compression_result = if self.has_unpublished_bytecodes() { Err(BytecodeCompressionError::BytecodeCompressionFailed) @@ -618,12 +666,8 @@ impl VmInterface for Vm { self.bootloader_state.start_new_l2_block(l2_block_env) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - todo!("Unused during batch execution") - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect((), VmExecutionMode::Batch); + let result = self.inspect(&mut Tr::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { @@ -648,21 +692,19 @@ impl VmInterface for Vm { #[derive(Debug)] struct VmSnapshot { - vm_snapshot: zksync_vm2::Snapshot, bootloader_snapshot: BootloaderStateSnapshot, gas_for_account_validation: u32, } -impl VmInterfaceHistoryEnabled for Vm { +impl VmInterfaceHistoryEnabled for Vm { fn make_snapshot(&mut self) { assert!( self.snapshot.is_none(), "cannot create a VM snapshot until a previous snapshot is rolled back to or popped" ); - self.delete_history_if_appropriate(); + self.inner.make_snapshot(); self.snapshot = Some(VmSnapshot { - vm_snapshot: self.inner.snapshot(), bootloader_snapshot: self.bootloader_state.get_snapshot(), gas_for_account_validation: self.gas_for_account_validation, }); @@ -670,25 +712,28 @@ impl VmInterfaceHistoryEnabled for Vm { fn rollback_to_the_latest_snapshot(&mut self) { let VmSnapshot { - vm_snapshot, bootloader_snapshot, gas_for_account_validation, } = self.snapshot.take().expect("no snapshots to rollback to"); - self.inner.rollback(vm_snapshot); + self.inner.rollback(); self.bootloader_state.apply_snapshot(bootloader_snapshot); self.gas_for_account_validation = gas_for_account_validation; - - self.delete_history_if_appropriate(); } fn pop_snapshot_no_rollback(&mut self) { + self.inner.pop_snapshot(); self.snapshot = None; - self.delete_history_if_appropriate(); } } -impl fmt::Debug for Vm { +impl VmTrackingContracts for Vm { + fn used_contract_hashes(&self) -> Vec { + self.decommitted_hashes().map(u256_to_h256).collect() + } +} + +impl fmt::Debug for Vm { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Vm") .field( @@ -721,58 +766,39 @@ impl World { } } - fn bytecode_to_program(bytecode: &[u8]) -> Program { - Program::new( - decode_program( - &bytecode - .chunks_exact(8) - .map(|chunk| u64::from_be_bytes(chunk.try_into().unwrap())) - .collect::>(), - false, - ), - bytecode - .chunks_exact(32) - .map(U256::from_big_endian) - .collect::>(), - ) - } - fn convert_system_contract_code( code: &SystemContractCode, is_bootloader: bool, ) -> (U256, Program) { ( h256_to_u256(code.hash), - Program::new( - decode_program( - &code - .code - .iter() - .flat_map(|x| x.0.into_iter().rev()) - .collect::>(), - is_bootloader, - ), - code.code.clone(), - ), + Program::from_words(code.code.clone(), is_bootloader), ) } } impl zksync_vm2::StorageInterface for World { - fn read_storage(&mut self, contract: H160, key: U256) -> Option { + fn read_storage(&mut self, contract: H160, key: U256) -> StorageSlot { let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); - if self.storage.is_write_initial(key) { - None - } else { - Some(self.storage.read_value(key).as_bytes().into()) + let value = U256::from_big_endian(self.storage.read_value(key).as_bytes()); + // `is_write_initial` value can be true even if the slot has previously been written to / has non-zero value! + // This can happen during oneshot execution (i.e., executing a single transaction) since it emulates + // execution starting in the middle of a batch in the general case. Hence, a slot that was first written to in the batch + // must still be considered an initial write by the refund logic. + let is_write_initial = self.storage.is_write_initial(key); + StorageSlot { + value, + is_write_initial, } } - fn cost_of_writing_storage(&mut self, initial_value: Option, new_value: U256) -> u32 { - let is_initial = initial_value.is_none(); - let initial_value = initial_value.unwrap_or_default(); + fn read_storage_value(&mut self, contract: H160, key: U256) -> U256 { + let key = &StorageKey::new(AccountTreeId::new(contract), u256_to_h256(key)); + U256::from_big_endian(self.storage.read_value(key).as_bytes()) + } - if initial_value == new_value { + fn cost_of_writing_storage(&mut self, slot: StorageSlot, new_value: U256) -> u32 { + if slot.value == new_value { return 0; } @@ -786,10 +812,9 @@ impl zksync_vm2::StorageInterface for World { // For value compression, we use a metadata byte which holds the length of the value and the operation from the // previous state to the new state, and the compressed value. The maximum for this is 33 bytes. // Total bytes for initial writes then becomes 65 bytes and repeated writes becomes 38 bytes. - let compressed_value_size = - compress_with_best_strategy(initial_value, new_value).len() as u32; + let compressed_value_size = compress_with_best_strategy(slot.value, new_value).len() as u32; - if is_initial { + if slot.is_write_initial { (BYTES_PER_DERIVED_KEY as u32) + compressed_value_size } else { (BYTES_PER_ENUMERATION_INDEX as u32) + compressed_value_size @@ -808,11 +833,12 @@ impl zksync_vm2::World for World { self.program_cache .entry(hash) .or_insert_with(|| { - Self::bytecode_to_program(self.bytecode_cache.entry(hash).or_insert_with(|| { + let bytecode = self.bytecode_cache.entry(hash).or_insert_with(|| { self.storage .load_factory_dep(u256_to_h256(hash)) .expect("vm tried to decommit nonexistent bytecode") - })) + }); + Program::new(bytecode, false) }) .clone() } diff --git a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs index 4931082d6da..23c079202c1 100644 --- a/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_latest/bootloader_state/utils.rs @@ -175,8 +175,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs index 66fc1a8bfd7..b8242fa7ca8 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs @@ -1,3 +1,5 @@ +use std::mem; + use zk_evm_1_5_0::aux_structures::Timestamp; use crate::{ @@ -20,7 +22,7 @@ use crate::{ impl Vm { pub(crate) fn inspect_inner( &mut self, - dispatcher: TracerDispatcher, + dispatcher: &mut TracerDispatcher, execution_mode: VmExecutionMode, custom_pubdata_tracer: Option>, ) -> VmExecutionResultAndLogs { @@ -44,7 +46,7 @@ impl Vm { /// Collect the result from the default tracers. fn inspect_and_collect_results( &mut self, - dispatcher: TracerDispatcher, + dispatcher: &mut TracerDispatcher, execution_mode: VmExecutionMode, with_refund_tracer: bool, custom_pubdata_tracer: Option>, @@ -54,7 +56,7 @@ impl Vm { let mut tx_tracer: DefaultExecutionTracer = DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, execution_mode, - dispatcher, + mem::take(dispatcher), self.storage.clone(), refund_tracers, custom_pubdata_tracer.or_else(|| { @@ -93,6 +95,7 @@ impl Vm { circuit_statistic_from_cycles(tx_tracer.circuits_tracer.statistics), ); let result = tx_tracer.result_tracer.into_result(); + *dispatcher = tx_tracer.dispatcher; let result = VmExecutionResultAndLogs { result, diff --git a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs index 34c1e1f81da..c1cf1504356 100644 --- a/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_latest/implementation/statistics.rs @@ -51,7 +51,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs index bed348afd2d..9909ca24937 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/block_tip.rs @@ -164,7 +164,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { for (i, data) in txs_data.into_iter().enumerate() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: CONTRACT_FORCE_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_FORCE_DEPLOYER_ADDRESS), calldata: data, value: U256::zero(), factory_deps: vec![], @@ -196,7 +196,7 @@ fn execute_test(test_data: L1MessengerTestData) -> TestStatistics { ); let result = vm.vm.inspect_inner( - TracerDispatcher::default(), + &mut TracerDispatcher::default(), VmExecutionMode::Batch, Some(pubdata_tracer), ); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs index a4d0eb2d17e..e7f26b7faf8 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/call_tracer.rs @@ -34,7 +34,7 @@ fn test_max_depth() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: vec![], value: Default::default(), factory_deps: vec![], @@ -45,7 +45,9 @@ fn test_max_depth() { let result = Arc::new(OnceCell::new()); let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); + let res = vm + .vm + .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); assert!(result.get().is_some()); assert!(res.result.is_failed()); } @@ -69,7 +71,7 @@ fn test_basic_behavior() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(increment_by_6_calldata).unwrap(), value: Default::default(), factory_deps: vec![], @@ -80,7 +82,9 @@ fn test_basic_behavior() { let result = Arc::new(OnceCell::new()); let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); + let res = vm + .vm + .inspect(&mut call_tracer.into(), VmExecutionMode::OneTx); let call_tracer_result = result.get().unwrap(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs index 02ec2dc58aa..c3c6816cbd8 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/circuits.rs @@ -22,7 +22,7 @@ fn test_circuits() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: Vec::new(), value: U256::from(1u8), factory_deps: vec![], @@ -30,7 +30,9 @@ fn test_circuits() { None, ); vm.vm.push_transaction(tx); - let res = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let res = vm + .vm + .inspect(&mut Default::default(), VmExecutionMode::OneTx); let s = res.statistics.circuit_statistic; // Check `circuit_statistic`. diff --git a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs index 0708d67e27a..b15ef7fde2b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/code_oracle.rs @@ -69,7 +69,7 @@ fn test_code_oracle() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -93,7 +93,7 @@ fn test_code_oracle() { // the decommitted bytecode gets erased (it shouldn't). let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), @@ -169,7 +169,7 @@ fn test_code_oracle_big_bytecode() { // Firstly, let's ensure that the contract works. let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(big_zkevm_bytecode_hash.0.to_vec()), @@ -251,7 +251,7 @@ fn refunds_in_code_oracle() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: precompiles_contract_address, + contract_address: Some(precompiles_contract_address), calldata: call_code_oracle_function .encode_input(&[ Token::FixedBytes(normal_zkevm_bytecode_hash.0.to_vec()), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs index 34e1e2d25f3..cc9aac5bb91 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/gas_limit.rs @@ -21,7 +21,10 @@ fn test_tx_gas_limit_offset() { let gas_limit = 9999.into(); let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute::default(), + Execute { + contract_address: Some(Default::default()), + ..Default::default() + }, Some(Fee { gas_limit, ..Account::default_fee() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs index a42037a7f5b..ef19717a627 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/get_used_contracts.rs @@ -82,7 +82,7 @@ fn test_get_used_contracts() { let account2 = Account::random(); let tx2 = account2.get_l1_tx( Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata: big_calldata, value: Default::default(), factory_deps: vec![vec![1; 32]], @@ -208,7 +208,7 @@ fn execute_proxy_counter(gas: u32) -> (VmTester, U256, VmExecut let increment = proxy_counter_abi.function("increment").unwrap(); let increment_tx = account.get_l2_tx_for_execute( Execute { - contract_address: deploy_tx.address, + contract_address: Some(deploy_tx.address), calldata: increment .encode_input(&[Token::Uint(1.into()), Token::Uint(gas.into())]) .unwrap(), diff --git a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs index dcb1bff06d0..0fc12848227 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/l1_tx_execution.rs @@ -173,7 +173,7 @@ fn test_l1_tx_execution_high_gas_limit() { let mut tx = account.get_l1_tx( Execute { - contract_address: L1_MESSENGER_ADDRESS, + contract_address: Some(L1_MESSENGER_ADDRESS), value: 0.into(), factory_deps: vec![], calldata, diff --git a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs index 661286ca969..91d78c69a93 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/nonce_holder.rs @@ -64,7 +64,7 @@ fn test_nonce_holder() { let mut transaction_data: TransactionData = account .get_l2_tx_for_execute_with_nonce( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: vec![12], value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs index 2ab40faf22c..110b14146c7 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/precompiles.rs @@ -31,7 +31,7 @@ fn test_keccak() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(keccak1000_calldata).unwrap(), value: Default::default(), factory_deps: vec![], @@ -39,7 +39,9 @@ fn test_keccak() { None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let _ = vm + .vm + .inspect(&mut Default::default(), VmExecutionMode::OneTx); let keccak_count = vm .vm @@ -75,7 +77,7 @@ fn test_sha256() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: hex::decode(sha1000_calldata).unwrap(), value: Default::default(), factory_deps: vec![], @@ -83,7 +85,9 @@ fn test_sha256() { None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let _ = vm + .vm + .inspect(&mut Default::default(), VmExecutionMode::OneTx); let sha_count = vm .vm @@ -112,7 +116,7 @@ fn test_ecrecover() { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: account.address, + contract_address: Some(account.address), calldata: Vec::new(), value: Default::default(), factory_deps: vec![], @@ -120,7 +124,9 @@ fn test_ecrecover() { None, ); vm.vm.push_transaction(tx); - let _ = vm.vm.inspect(Default::default(), VmExecutionMode::OneTx); + let _ = vm + .vm + .inspect(&mut Default::default(), VmExecutionMode::OneTx); let ecrecover_count = vm .vm diff --git a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs index eb3104fd637..230b1d0ad87 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/prestate_tracer.rs @@ -40,7 +40,8 @@ fn test_prestate_tracer() { let prestate_tracer_result = Arc::new(OnceCell::default()); let prestate_tracer = PrestateTracer::new(false, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); - vm.vm.inspect(tracer_ptr.into(), VmExecutionMode::Batch); + vm.vm + .inspect(&mut tracer_ptr.into(), VmExecutionMode::Batch); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() @@ -88,7 +89,7 @@ fn test_prestate_tracer_diff_mode() { //enter ether to contract to see difference in the balance post execution let tx0 = Execute { - contract_address: vm.test_contract.unwrap(), + contract_address: Some(vm.test_contract.unwrap()), calldata: Default::default(), value: U256::from(100000), factory_deps: vec![], @@ -98,7 +99,7 @@ fn test_prestate_tracer_diff_mode() { .push_transaction(account.get_l2_tx_for_execute(tx0.clone(), None)); let tx1 = Execute { - contract_address: deployed_address2, + contract_address: Some(deployed_address2), calldata: Default::default(), value: U256::from(200000), factory_deps: vec![], @@ -110,7 +111,7 @@ fn test_prestate_tracer_diff_mode() { let prestate_tracer = PrestateTracer::new(true, prestate_tracer_result.clone()); let tracer_ptr = prestate_tracer.into_tracer_pointer(); vm.vm - .inspect(tracer_ptr.into(), VmExecutionMode::Bootloader); + .inspect(&mut tracer_ptr.into(), VmExecutionMode::Bootloader); let prestate_result = Arc::try_unwrap(prestate_tracer_result) .unwrap() diff --git a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs index ca058d672d2..cc0085f2025 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/refunds.rs @@ -188,7 +188,7 @@ fn negative_pubdata_for_transaction() { let expensive_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: expensive_function .encode_input(&[Token::Uint(10.into())]) .unwrap(), @@ -207,7 +207,7 @@ fn negative_pubdata_for_transaction() { // This transaction cleans all initial writes in the contract, thus having negative `pubdata` impact. let clean_up_tx = vm.rich_accounts[0].get_l2_tx_for_execute( Execute { - contract_address: expensive_contract_address, + contract_address: Some(expensive_contract_address), calldata: cleanup_function.encode_input(&[]).unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs index 779e9b5c629..a6dc7118005 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/require_eip712.rs @@ -1,5 +1,5 @@ use ethabi::Token; -use zksync_eth_signer::{EthereumSigner, TransactionParameters}; +use zksync_eth_signer::TransactionParameters; use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; use zksync_types::{ fee::Fee, l2::L2Tx, transaction_request::TransactionRequest, @@ -29,11 +29,11 @@ impl VmTester { } // TODO refactor this test it use too much internal details of the VM -#[tokio::test] +#[test] /// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy /// and EIP712 transactions. /// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { +fn test_require_eip712() { // Use 3 accounts: // - `private_address` - EOA account, where we have the key // - `account_address` - AA account, where the contract is deployed @@ -63,7 +63,7 @@ async fn test_require_eip712() { let tx = private_account.get_l2_tx_for_execute( Execute { - contract_address: account_abstraction.address, + contract_address: Some(account_abstraction.address), calldata: encoded_input, value: Default::default(), factory_deps: vec![], @@ -95,7 +95,7 @@ async fn test_require_eip712() { blob_versioned_hashes: None, }; - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; + let aa_tx = private_account.sign_legacy_tx(aa_raw_tx); let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); @@ -120,7 +120,7 @@ async fn test_require_eip712() { // // Now send the 'classic' EIP712 transaction let tx_712 = L2Tx::new( - beneficiary.address, + Some(beneficiary.address), vec![], Nonce(1), Fee { @@ -142,7 +142,6 @@ async fn test_require_eip712() { let signature = private_account .get_pk_signer() .sign_typed_data(&domain, &transaction_request) - .await .unwrap(); let encoded_tx = transaction_request.get_signed_bytes(&signature).unwrap(); diff --git a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs index 43e7baae3b2..00a5d6494fe 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/rollbacks.rs @@ -1,12 +1,14 @@ +use assert_matches::assert_matches; use ethabi::Token; use zksync_contracts::{get_loadnext_contract, test_contracts::LoadnextContractExecutionParams}; -use zksync_types::{get_nonce_key, Execute, U256}; +use zksync_types::{get_nonce_key, Address, Execute, U256}; use crate::{ interface::{ storage::WriteStorage, tracer::{TracerExecutionStatus, TracerExecutionStopReason}, - TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + ExecutionResult, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceExt, + VmInterfaceHistoryEnabled, }, tracers::dynamic::vm_1_5_0::DynTracer, vm_latest::{ @@ -92,7 +94,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_1 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -110,7 +112,7 @@ fn test_vm_loadnext_rollbacks() { let loadnext_tx_2 = account.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: 100, @@ -224,14 +226,11 @@ fn test_layered_rollback() { vm.vm.make_snapshot(); vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - MaxRecursionTracer { - max_recursion_depth: 15, - } - .into_tracer_pointer() - .into(), - VmExecutionMode::OneTx, - ); + let tracer = MaxRecursionTracer { + max_recursion_depth: 15, + } + .into_tracer_pointer(); + vm.vm.inspect(&mut tracer.into(), VmExecutionMode::OneTx); let nonce_val2 = vm .vm @@ -261,3 +260,37 @@ fn test_layered_rollback() { let result = vm.vm.execute(VmExecutionMode::OneTx); assert!(!result.result.is_failed(), "transaction must not fail"); } + +#[test] +fn rollback_in_call_mode() { + let counter_bytecode = read_test_contract(); + let counter_address = Address::repeat_byte(1); + + let mut vm = VmTesterBuilder::new(HistoryEnabled) + .with_empty_in_memory_storage() + .with_execution_mode(TxExecutionMode::EthCall) + .with_custom_contracts(vec![(counter_bytecode, counter_address, false)]) + .with_random_rich_accounts(1) + .build(); + let account = &mut vm.rich_accounts[0]; + let tx = account.get_test_contract_transaction(counter_address, true, None, false, TxType::L2); + + let (compression_result, vm_result) = vm + .vm + .execute_transaction_with_bytecode_compression(tx, true); + compression_result.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } + if output.to_string().contains("This method always reverts") + ); + + let storage_logs = vm + .vm + .get_current_execution_state() + .deduplicated_storage_logs; + assert!( + storage_logs.iter().all(|log| !log.is_write()), + "{storage_logs:?}" + ); +} diff --git a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs index 6cc731a1387..93be9506a3b 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/sekp256r1.rs @@ -48,7 +48,7 @@ fn test_sekp256r1() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: P256VERIFY_PRECOMPILE_ADDRESS, + contract_address: Some(P256VERIFY_PRECOMPILE_ADDRESS), calldata: [digest, encoded_r, encoded_s, x, y].concat(), value: U256::zero(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs index 0fe0b0220fa..126d174a646 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/storage.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/storage.rs @@ -50,7 +50,7 @@ fn test_storage(txs: Vec) -> u32 { let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: 0.into(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs index 58c5ef77dc4..2db37881352 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/tracing_execution_error.rs @@ -27,7 +27,7 @@ fn test_tracing_of_execution_errors() { let tx = account.get_l2_tx_for_execute( Execute { - contract_address, + contract_address: Some(contract_address), calldata: get_execute_error_calldata(), value: Default::default(), factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs index 31f6c3291ef..2c380623636 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/transfer.rs @@ -73,7 +73,7 @@ fn test_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value: U256::zero(), factory_deps: vec![], @@ -169,7 +169,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let account = &mut vm.rich_accounts[0]; let tx1 = account.get_l2_tx_for_execute( Execute { - contract_address: reentrant_recipeint_address, + contract_address: Some(reentrant_recipeint_address), calldata: reentrant_recipient_abi .function("setX") .unwrap() @@ -190,7 +190,7 @@ fn test_reentrancy_protection_send_or_transfer(test_option: TestOptions) { let tx2 = account.get_l2_tx_for_execute( Execute { - contract_address: test_contract_address, + contract_address: Some(test_contract_address), calldata, value, factory_deps: vec![], diff --git a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs index 7c3ebff4a77..d85a504de40 100644 --- a/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs +++ b/core/lib/multivm/src/versions/vm_latest/tests/upgrade.rs @@ -276,7 +276,7 @@ fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![], value: U256::zero(), @@ -326,7 +326,7 @@ fn get_complex_upgrade_tx( .unwrap(); let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, + contract_address: Some(COMPLEX_UPGRADER_ADDRESS), calldata: complex_upgrader_calldata, factory_deps: vec![], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs index 502be0dc22c..2ec86eb3cea 100644 --- a/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_latest/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_latest::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -305,7 +305,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_latest/vm.rs b/core/lib/multivm/src/versions/vm_latest/vm.rs index a445a1d5140..8ccd600a79e 100644 --- a/core/lib/multivm/src/versions/vm_latest/vm.rs +++ b/core/lib/multivm/src/versions/vm_latest/vm.rs @@ -2,8 +2,9 @@ use circuit_sequencer_api_1_5_0::sort_storage_access::sort_storage_access_querie use zksync_types::{ l2_to_l1_log::{SystemL2ToL1Log, UserL2ToL1Log}, vm::VmVersion, - Transaction, + Transaction, H256, }; +use zksync_utils::u256_to_h256; use crate::{ glue::GlueInto, @@ -12,7 +13,7 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, + VmTrackingContracts, }, utils::events::extract_l2tol1logs_from_l1_messenger, vm_latest::{ @@ -125,7 +126,7 @@ impl VmInterface for Vm { /// Execute VM with custom tracers. fn inspect( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { self.inspect_inner(tracer, execution_mode, None) @@ -138,7 +139,7 @@ impl VmInterface for Vm { /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { @@ -160,12 +161,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); + let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { @@ -238,3 +235,12 @@ impl VmInterfaceHistoryEnabled for Vm { self.snapshots.pop(); } } + +impl VmTrackingContracts for Vm { + fn used_contract_hashes(&self) -> Vec { + self.get_used_contracts() + .into_iter() + .map(u256_to_h256) + .collect() + } +} diff --git a/core/lib/multivm/src/versions/vm_m5/test_utils.rs b/core/lib/multivm/src/versions/vm_m5/test_utils.rs index 785eb49835f..d7c0dfb9f6d 100644 --- a/core/lib/multivm/src/versions/vm_m5/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m5/test_utils.rs @@ -151,7 +151,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs index 7ef739fd5bf..b64e3f77018 100644 --- a/core/lib/multivm/src/versions/vm_m5/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m5/transaction_data.rs @@ -22,7 +22,7 @@ const L1_TX_TYPE: u8 = 255; pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -144,7 +144,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -479,7 +479,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_m5/vm.rs b/core/lib/multivm/src/versions/vm_m5/vm.rs index df4baccaf15..5a26506f346 100644 --- a/core/lib/multivm/src/versions/vm_m5/vm.rs +++ b/core/lib/multivm/src/versions/vm_m5/vm.rs @@ -50,6 +50,10 @@ impl Vm { _phantom: Default::default(), } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics::default() + } } impl VmInterface for Vm { @@ -66,7 +70,7 @@ impl VmInterface for Vm { fn inspect( &mut self, - _tracer: Self::TracerDispatcher, + _tracer: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { match execution_mode { @@ -90,7 +94,7 @@ impl VmInterface for Vm { fn inspect_transaction_with_bytecode_compression( &mut self, - _tracer: Self::TracerDispatcher, + _tracer: &mut Self::TracerDispatcher, tx: Transaction, _with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { @@ -100,20 +104,10 @@ impl VmInterface for Vm { self.system_env.execution_mode.glue_into(), ); // Bytecode compression isn't supported - (Ok(vec![].into()), self.inspect((), VmExecutionMode::OneTx)) - } - - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: 0, - event_sink_history: 0, - memory_inner: 0, - memory_history: 0, - decommittment_processor_inner: 0, - decommittment_processor_history: 0, - storage_inner: 0, - storage_history: 0, - } + ( + Ok(vec![].into()), + self.inspect(&mut (), VmExecutionMode::OneTx), + ) } fn finish_batch(&mut self) -> FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_m6/test_utils.rs b/core/lib/multivm/src/versions/vm_m6/test_utils.rs index ecad7d911b4..4bd39bc56dd 100644 --- a/core/lib/multivm/src/versions/vm_m6/test_utils.rs +++ b/core/lib/multivm/src/versions/vm_m6/test_utils.rs @@ -151,7 +151,7 @@ pub fn get_create_execute(code: &[u8], calldata: &[u8]) -> Execute { .expect("failed to encode parameters"); Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps: vec![code.to_vec()], value: U256::zero(), diff --git a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs index 99ce4671c29..a8f80ea3255 100644 --- a/core/lib/multivm/src/versions/vm_m6/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_m6/transaction_data.rs @@ -23,7 +23,7 @@ pub(crate) const L1_TX_TYPE: u8 = 255; pub struct TransactionData { pub tx_type: u8, pub from: Address, - pub to: Address, + pub to: Option
, pub gas_limit: U256, pub pubdata_price_limit: U256, pub max_fee_per_gas: U256, @@ -171,7 +171,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -592,7 +592,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_m6/vm.rs b/core/lib/multivm/src/versions/vm_m6/vm.rs index 7e19076a520..1fdc8ae64f8 100644 --- a/core/lib/multivm/src/versions/vm_m6/vm.rs +++ b/core/lib/multivm/src/versions/vm_m6/vm.rs @@ -50,6 +50,23 @@ impl Vm { system_env, } } + + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + VmMemoryMetrics { + event_sink_inner: self.vm.state.event_sink.get_size(), + event_sink_history: self.vm.state.event_sink.get_history_size(), + memory_inner: self.vm.state.memory.get_size(), + memory_history: self.vm.state.memory.get_history_size(), + decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), + decommittment_processor_history: self + .vm + .state + .decommittment_processor + .get_history_size(), + storage_inner: self.vm.state.storage.get_size(), + storage_history: self.vm.state.storage.get_history_size(), + } + } } impl VmInterface for Vm { @@ -66,7 +83,7 @@ impl VmInterface for Vm { fn inspect( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { if let Some(storage_invocations) = tracer.storage_invocations { @@ -106,7 +123,7 @@ impl VmInterface for Vm { fn inspect_transaction_with_bytecode_compression( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { @@ -186,23 +203,6 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - VmMemoryMetrics { - event_sink_inner: self.vm.state.event_sink.get_size(), - event_sink_history: self.vm.state.event_sink.get_history_size(), - memory_inner: self.vm.state.memory.get_size(), - memory_history: self.vm.state.memory.get_history_size(), - decommittment_processor_inner: self.vm.state.decommittment_processor.get_size(), - decommittment_processor_history: self - .vm - .state - .decommittment_processor - .get_history_size(), - storage_inner: self.vm.state.storage.get_size(), - storage_history: self.vm.state.storage.get_history_size(), - } - } - fn finish_batch(&mut self) -> FinishedL1Batch { self.vm .execute_till_block_end( diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs index 7bd488f90a9..14c895d7a0b 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs index cadd183735e..8196760a621 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/execution.rs @@ -1,3 +1,5 @@ +use std::mem; + use zk_evm_1_3_3::aux_structures::Timestamp; use crate::{ @@ -19,7 +21,7 @@ use crate::{ impl Vm { pub(crate) fn inspect_inner( &mut self, - dispatcher: TracerDispatcher, + dispatcher: &mut TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { let mut enable_refund_tracer = false; @@ -37,7 +39,7 @@ impl Vm { /// Collect the result from the default tracers. fn inspect_and_collect_results( &mut self, - dispatcher: TracerDispatcher, + dispatcher: &mut TracerDispatcher, execution_mode: VmExecutionMode, with_refund_tracer: bool, ) -> (VmExecutionStopReason, VmExecutionResultAndLogs) { @@ -47,7 +49,7 @@ impl Vm { DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, execution_mode, - dispatcher, + mem::take(dispatcher), self.storage.clone(), refund_tracers, ); @@ -81,6 +83,7 @@ impl Vm { ); let result = tx_tracer.result_tracer.into_result(); + *dispatcher = tx_tracer.dispatcher; let result = VmExecutionResultAndLogs { result, diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs index dcda1457b76..a73c212db29 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs deleted file mode 100644 index 23b250d485b..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bootloader.rs +++ /dev/null @@ -1,54 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::interface::ExecutionResult; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs deleted file mode 100644 index b2c126dea00..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs deleted file mode 100644 index fb2d3389407..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/call_tracer.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_refunds_enhancement::{CallTracer, HistoryEnabled}; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone(), HistoryEnabled); - vm.vm.push_transaction(tx); - let res = vm - .vm - .inspect(vec![Box::new(call_tracer)], VmExecutionMode::OneTx); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs deleted file mode 100644 index 92e043ae96f..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs deleted file mode 100644 index 0ea1669cf21..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/gas_limit.rs +++ /dev/null @@ -1,47 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; - -use crate::interface::TxExecutionMode; -use crate::vm_refunds_enhancement::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs deleted file mode 100644 index 8c121db3e43..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/get_used_contracts.rs +++ /dev/null @@ -1,104 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_refunds_enhancement::{HistoryDisabled, HistoryMode, Vm}; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs deleted file mode 100644 index 88ed141630a..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::types::inputs::system_env::TxExecutionMode; -use crate::vm_refunds_enhancement::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs deleted file mode 100644 index d7b96133000..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs deleted file mode 100644 index 138879cd7ed..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.user_l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs deleted file mode 100644 index 269b6cf396c..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/l2_blocks.rs +++ /dev/null @@ -1,498 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_refunds_enhancement::tests::tester::default_l1_batch; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, Vm}; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs deleted file mode 100644 index ffb38dd3725..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs deleted file mode 100644 index 21959461906..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/nonce_holder.rs +++ /dev/null @@ -1,181 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::TxExecutionMode; -use crate::interface::VmRevertReason; -use crate::interface::{ExecutionResult, Halt, TxRevertReason, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_nonce_holder_tester; -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs deleted file mode 100644 index 54c281a9939..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/refunds.rs +++ /dev/null @@ -1,172 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; - -use crate::vm_refunds_enhancement::types::internals::TransactionData; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.user_l2_to_l1_logs, - current_state_without_predefined_refunds.user_l2_to_l1_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.system_logs, - current_state_without_predefined_refunds.system_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.deduplicated_events_logs, - current_state_without_predefined_refunds.deduplicated_events_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs deleted file mode 100644 index 03a704841b0..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/require_eip712.rs +++ /dev/null @@ -1,163 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{ - AccountTreeId, Address, Eip712Domain, Execute, L2ChainId, Nonce, Transaction, U256, -}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_refunds_enhancement::tests::utils::read_many_owners_custom_account_contract; -use crate::vm_refunds_enhancement::HistoryDisabled; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, L2ChainId::from(270)).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(L2ChainId::from(chain_id)); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, L2ChainId::from(chain_id)); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, L2ChainId::from(chain_id)).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs deleted file mode 100644 index 8107ddcdabf..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/rollbacks.rs +++ /dev/null @@ -1,259 +0,0 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - -use crate::interface::storage::WriteStorage; -use zksync_types::{get_nonce_key, Execute, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::{ - BootloaderState, DynTracer, HistoryEnabled, HistoryMode, TracerExecutionStatus, - TracerExecutionStopReason, VmTracer, -}; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -// Testing tracer that does not allow the recursion to go deeper than a certain limit -struct MaxRecursionTracer { - max_recursion_depth: usize, -} - -/// Tracer responsible for calculating the number of storage invocations and -/// stopping the VM execution if the limit is reached. -impl DynTracer for MaxRecursionTracer {} - -impl VmTracer for MaxRecursionTracer { - fn finish_cycle( - &mut self, - state: &mut ZkSyncVmState, - _bootloader_state: &mut BootloaderState, - ) -> TracerExecutionStatus { - let current_depth = state.local_state.callstack.depth(); - - if current_depth > self.max_recursion_depth { - TracerExecutionStatus::Stop(TracerExecutionStopReason::Finish) - } else { - TracerExecutionStatus::Continue - } - } -} - -#[test] -fn test_layered_rollback() { - // This test checks that the layered rollbacks work correctly, i.e. - // the rollback by the operator will always revert all the changes - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - let loadnext_contract = get_loadnext_contract().bytecode; - - let DeployContractsTx { - tx: deploy_tx, - address, - .. - } = account.get_deploy_tx( - &loadnext_contract, - Some(&[Token::Uint(0.into())]), - TxType::L2, - ); - vm.vm.push_transaction(deploy_tx); - let deployment_res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!deployment_res.result.is_failed(), "transaction failed"); - - let loadnext_transaction = account.get_loadnext_transaction( - address, - LoadnextContractExecutionParams { - writes: 1, - recursive_calls: 20, - ..LoadnextContractExecutionParams::empty() - }, - TxType::L2, - ); - - let nonce_val = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - vm.vm.make_snapshot(); - - vm.vm.push_transaction(loadnext_transaction.clone()); - vm.vm.inspect( - vec![Box::new(MaxRecursionTracer { - max_recursion_depth: 15, - })], - VmExecutionMode::OneTx, - ); - - let nonce_val2 = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - // The tracer stopped after the validation has passed, so nonce has already been increased - assert_eq!(nonce_val + U256::one(), nonce_val2, "nonce did not change"); - - vm.vm.rollback_to_the_latest_snapshot(); - - let nonce_val_after_rollback = vm - .vm - .state - .storage - .storage - .read_from_storage(&get_nonce_key(&account.address)); - - assert_eq!( - nonce_val, nonce_val_after_rollback, - "nonce changed after rollback" - ); - - vm.vm.push_transaction(loadnext_transaction); - let result = vm.vm.inspect(vec![], VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "transaction must not fail"); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs deleted file mode 100644 index eb5e3879837..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_refunds_enhancement::HistoryDisabled; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs deleted file mode 100644 index 3158fc49444..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/inner_state.rs +++ /dev/null @@ -1,127 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_refunds_enhancement::old_vm::event_sink::InMemoryEventSink; -use crate::vm_refunds_enhancement::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_refunds_enhancement::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, - - pub(crate) pre_paid_changes: HistoryRecorder, H>, - pub(crate) paid_changes: HistoryRecorder, H>, - pub(crate) initial_values: HistoryRecorder, H>, - pub(crate) returned_refunds: HistoryRecorder, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - pre_paid_changes: self.state.storage.pre_paid_changes.clone(), - paid_changes: self.state.storage.paid_changes.clone(), - initial_values: self.state.storage.initial_values.clone(), - returned_refunds: self.state.storage.returned_refunds.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs deleted file mode 100644 index 8f7ecc0a733..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,217 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::VmRevertReason; -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, -}; -use crate::vm_refunds_enhancement::tests::tester::vm_tester::VmTester; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs deleted file mode 100644 index 800af517ed3..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tester/vm_tester.rs +++ /dev/null @@ -1,300 +0,0 @@ -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, L2ChainId, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_refunds_enhancement::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{ - L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, TxExecutionMode, VmExecutionMode, -}; -use crate::vm_refunds_enhancement::tests::tester::Account; -use crate::vm_refunds_enhancement::tests::tester::TxType; -use crate::vm_refunds_enhancement::tests::utils::read_test_contract; -use crate::vm_refunds_enhancement::utils::l2_blocks::load_last_l2_block; -use crate::vm_refunds_enhancement::{HistoryMode, Vm}; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - history_mode: H, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new( - l1_batch, - self.vm.system_env.clone(), - self.storage.clone(), - self.history_mode.clone(), - ); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - history_mode: H, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - history_mode: self.history_mode.clone(), - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(history_mode: H) -> Self { - Self { - history_mode, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: L2ChainId::from(270), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new( - l1_batch_env, - self.system_env, - storage_ptr.clone(), - self.history_mode.clone(), - ); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - history_mode: self.history_mode, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs deleted file mode 100644 index a839f4708ad..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/tracing_execution_error.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::{Execute, H160}; - -use crate::interface::TxExecutionMode; -use crate::interface::{TxRevertReason, VmRevertReason}; -use crate::vm_refunds_enhancement::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_refunds_enhancement::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_refunds_enhancement::HistoryEnabled; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs deleted file mode 100644 index cbbec9a83d5..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/upgrade.rs +++ /dev/null @@ -1,342 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode}; -use crate::vm_refunds_enhancement::tests::tester::VmTesterBuilder; -use crate::vm_refunds_enhancement::tests::utils::verify_required_storage; -use crate::vm_refunds_enhancement::HistoryEnabled; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs deleted file mode 100644 index ffbb9d89260..00000000000 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_refunds_enhancement::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_refunds_enhancement::types::internals::ZkSyncVmState; -use crate::vm_refunds_enhancement::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs index 205090ba633..22ab09296c9 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_refunds_enhancement::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs index 119abf052b9..d87fd4d104d 100644 --- a/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs +++ b/core/lib/multivm/src/versions/vm_refunds_enhancement/vm.rs @@ -8,7 +8,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_refunds_enhancement::{ @@ -83,7 +82,7 @@ impl VmInterface for Vm { /// Execute VM with custom tracers. fn inspect( &mut self, - dispatcher: Self::TracerDispatcher, + dispatcher: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { self.inspect_inner(dispatcher, execution_mode) @@ -96,7 +95,7 @@ impl VmInterface for Vm { /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( &mut self, - dispatcher: Self::TracerDispatcher, + dispatcher: &mut Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { @@ -118,12 +117,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); + let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs index 2ccedcc6aa9..3e2474835fa 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/bootloader_state/utils.rs @@ -133,8 +133,8 @@ pub(super) fn assemble_tx_meta(execution_mode: TxExecutionMode, execute_tx: bool // Set 0 byte (execution mode) output[0] = match execution_mode { TxExecutionMode::VerifyExecute => 0x00, - TxExecutionMode::EstimateFee { .. } => 0x00, - TxExecutionMode::EthCall { .. } => 0x02, + TxExecutionMode::EstimateFee => 0x00, + TxExecutionMode::EthCall => 0x02, }; // Set 31 byte (marker for tx execution) diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs index 42709c345ea..c48d48edd3b 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/execution.rs @@ -1,3 +1,5 @@ +use std::mem; + use zk_evm_1_3_3::aux_structures::Timestamp; use crate::{ @@ -21,7 +23,7 @@ use crate::{ impl Vm { pub(crate) fn inspect_inner( &mut self, - tracer: TracerDispatcher, + tracer: &mut TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { let mut enable_refund_tracer = false; @@ -40,7 +42,7 @@ impl Vm { /// Collect the result from the default tracers. fn inspect_and_collect_results( &mut self, - dispatcher: TracerDispatcher, + dispatcher: &mut TracerDispatcher, execution_mode: VmExecutionMode, enable_refund_tracer: bool, ) -> (VmExecutionStopReason, VmExecutionResultAndLogs) { @@ -50,7 +52,7 @@ impl Vm { DefaultExecutionTracer::new( self.system_env.default_validation_computational_gas_limit, execution_mode, - dispatcher, + mem::take(dispatcher), refund_tracer, self.storage.clone(), ); @@ -89,6 +91,7 @@ impl Vm { }; tx_tracer.dispatcher.save_results(&mut result); + *dispatcher = tx_tracer.dispatcher; (stop_reason, result) } diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs index d082085a155..dbd8813035e 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/implementation/statistics.rs @@ -56,7 +56,7 @@ impl Vm { } /// Returns the info about all oracles' sizes. - pub(crate) fn record_vm_memory_metrics_inner(&self) -> VmMemoryMetrics { + pub(crate) fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { VmMemoryMetrics { event_sink_inner: self.state.event_sink.get_size(), event_sink_history: self.state.event_sink.get_history_size(), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs deleted file mode 100644 index a30b5a58f63..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bootloader.rs +++ /dev/null @@ -1,53 +0,0 @@ -use zksync_types::U256; - -use crate::interface::{ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::constants::BOOTLOADER_HEAP_PAGE; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{ - get_bootloader, verify_required_memory, BASE_SYSTEM_CONTRACTS, -}; - -use crate::vm_latest::HistoryEnabled; - -#[test] -fn test_dummy_bootloader() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed()); - - let correct_first_cell = U256::from_str_radix("123123123", 16).unwrap(); - verify_required_memory( - &vm.vm.state, - vec![(correct_first_cell, BOOTLOADER_HEAP_PAGE, 0)], - ); -} - -#[test] -fn test_bootloader_out_of_gas() { - let mut base_system_contracts = BASE_SYSTEM_CONTRACTS.clone(); - base_system_contracts.bootloader = get_bootloader("dummy"); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(base_system_contracts) - .with_gas_limit(10) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - let res = vm.vm.execute(VmExecutionMode::Batch); - - assert_matches!( - res.result, - ExecutionResult::Halt { - reason: Halt::BootloaderOutOfGas - } - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs deleted file mode 100644 index 773aa77e150..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/bytecode_publishing.rs +++ /dev/null @@ -1,37 +0,0 @@ -use zksync_types::event::extract_long_l2_to_l1_messages; -use zksync_utils::bytecode::compress_bytecode; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_bytecode_publishing() { - // In this test, we aim to ensure that the contents of the compressed bytecodes - // are included as part of the L2->L1 long messages - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let compressed_bytecode = compress_bytecode(&counter).unwrap(); - - let DeployContractsTx { tx, .. } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - - let state = vm.vm.get_current_execution_state(); - let long_messages = extract_long_l2_to_l1_messages(&state.events); - assert!( - long_messages.contains(&compressed_bytecode), - "Bytecode not published" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs deleted file mode 100644 index 7ee647ee1f7..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/call_tracer.rs +++ /dev/null @@ -1,88 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::tracers::CallTracer; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::{read_max_depth_contract, read_test_contract}; -use crate::vm_virtual_blocks::tracers::traits::ToTracerPointer; -use once_cell::sync::OnceCell; -use std::sync::Arc; -use zksync_types::{Address, Execute}; - -// This test is ultra slow, so it's ignored by default. -#[test] -#[ignore] -fn test_max_depth() { - let contarct = read_max_depth_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()).into_tracer_pointer(); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect(call_tracer.into(), VmExecutionMode::OneTx); - assert!(result.get().is_some()); - assert!(res.result.is_failed()); -} - -#[test] -fn test_basic_behavior() { - let contarct = read_test_contract(); - let address = Address::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_random_rich_accounts(1) - .with_deployer() - .with_gas_limit(BLOCK_GAS_LIMIT) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_custom_contracts(vec![(contarct, address, true)]) - .build(); - - let increment_by_6_calldata = - "7cf5dab00000000000000000000000000000000000000000000000000000000000000006"; - - let account = &mut vm.rich_accounts[0]; - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: hex::decode(increment_by_6_calldata).unwrap(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result = Arc::new(OnceCell::new()); - let call_tracer = CallTracer::new(result.clone()); - vm.vm.push_transaction(tx); - let res = vm.vm.inspect( - call_tracer.into_tracer_pointer().into(), - VmExecutionMode::OneTx, - ); - - let call_tracer_result = result.get().unwrap(); - - assert_eq!(call_tracer_result.len(), 1); - // Expect that there are a plenty of subcalls underneath. - let subcall = &call_tracer_result[0].calls; - assert!(subcall.len() > 10); - assert!(!res.result.is_failed()); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs deleted file mode 100644 index 02a69a6a5d2..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/default_aa.rs +++ /dev/null @@ -1,70 +0,0 @@ -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::system_contracts::{DEPLOYMENT_NONCE_INCREMENT, TX_NONCE_INCREMENT}; - -use zksync_types::{get_code_key, get_known_code_key, get_nonce_key, AccountTreeId, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - get_balance, read_test_contract, verify_required_storage, -}; - -#[test] -fn test_default_aa_interaction() { - // In this test, we aim to test whether a simple account interaction (without any fee logic) - // will work. The account will try to deploy a simple contract from integration tests. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let DeployContractsTx { - tx, - bytecode_hash, - address, - } = account.get_deploy_tx(&counter, None, TxType::L2); - let maximal_fee = tx.gas_limit() * vm.vm.batch_env.base_fee(); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "Transaction wasn't successful"); - - vm.vm.execute(VmExecutionMode::Batch); - vm.vm.get_current_execution_state(); - - // Both deployment and ordinary nonce should be incremented by one. - let account_nonce_key = get_nonce_key(&account.address); - let expected_nonce = TX_NONCE_INCREMENT + DEPLOYMENT_NONCE_INCREMENT; - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&address); - - let expected_slots = vec![ - (u256_to_h256(expected_nonce), account_nonce_key), - (u256_to_h256(U256::from(1u32)), known_codes_key), - (bytecode_hash, account_code_key), - ]; - - verify_required_storage(&vm.vm.state, expected_slots); - - let expected_fee = maximal_fee - - U256::from(result.refunds.gas_refunded) * U256::from(vm.vm.batch_env.base_fee()); - let operator_balance = get_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &vm.fee_account, - vm.vm.state.storage.storage.get_ptr(), - ); - - assert_eq!( - operator_balance, expected_fee, - "Operator did not receive his fee" - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs deleted file mode 100644 index 01ebe4c0d22..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/gas_limit.rs +++ /dev/null @@ -1,47 +0,0 @@ -use zksync_types::fee::Fee; -use zksync_types::Execute; - -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_DESCRIPTION_OFFSET, TX_GAS_LIMIT_OFFSET, -}; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; - -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; - -/// Checks that `TX_GAS_LIMIT_OFFSET` constant is correct. -#[test] -fn test_tx_gas_limit_offset() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let gas_limit = 9999.into(); - let tx = vm.rich_accounts[0].get_l2_tx_for_execute( - Execute { - contract_address: Default::default(), - calldata: vec![], - value: Default::default(), - factory_deps: None, - }, - Some(Fee { - gas_limit, - ..Default::default() - }), - ); - - vm.vm.push_transaction(tx); - - let gas_limit_from_memory = vm - .vm - .state - .memory - .read_slot( - BOOTLOADER_HEAP_PAGE as usize, - TX_DESCRIPTION_OFFSET + TX_GAS_LIMIT_OFFSET, - ) - .value; - assert_eq!(gas_limit_from_memory, gas_limit); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs deleted file mode 100644 index 06d8191310b..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/get_used_contracts.rs +++ /dev/null @@ -1,106 +0,0 @@ -use std::collections::{HashMap, HashSet}; - -use itertools::Itertools; - -use crate::HistoryMode; -use crate::interface::storage::WriteStorage; -use zksync_system_constants::CONTRACT_DEPLOYER_ADDRESS; -use zksync_test_account::Account; -use zksync_types::{Execute, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::h256_to_u256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{read_test_contract, BASE_SYSTEM_CONTRACTS}; -use crate::vm_virtual_blocks::Vm; - -#[test] -fn test_get_used_contracts() { - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .build(); - - assert!(known_bytecodes_without_aa_code(&vm.vm).is_empty()); - - // create and push and execute some not-empty factory deps transaction with success status - // to check that get_used_contracts() updates - let contract_code = read_test_contract(); - let mut account = Account::random(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 0 }); - vm.vm.push_transaction(tx.tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - assert!(vm - .vm - .get_used_contracts() - .contains(&h256_to_u256(tx.bytecode_hash))); - - // Note: Default_AA will be in the list of used contracts if l2 tx is used - assert_eq!( - vm.vm - .get_used_contracts() - .into_iter() - .collect::>(), - known_bytecodes_without_aa_code(&vm.vm) - .keys() - .cloned() - .collect::>() - ); - - // create push and execute some non-empty factory deps transaction that fails - // (known_bytecodes will be updated but we expect get_used_contracts() to not be updated) - - let calldata = [1, 2, 3]; - let big_calldata: Vec = calldata - .iter() - .cycle() - .take(calldata.len() * 1024) - .cloned() - .collect(); - let account2 = Account::random(); - let tx2 = account2.get_l1_tx( - Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata: big_calldata, - value: Default::default(), - factory_deps: Some(vec![vec![1; 32]]), - }, - 1, - ); - - vm.vm.push_transaction(tx2.clone()); - - let res2 = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(res2.result.is_failed()); - - for factory_dep in tx2.execute.factory_deps.unwrap() { - let hash = hash_bytecode(&factory_dep); - let hash_to_u256 = h256_to_u256(hash); - assert!(known_bytecodes_without_aa_code(&vm.vm) - .keys() - .contains(&hash_to_u256)); - assert!(!vm.vm.get_used_contracts().contains(&hash_to_u256)); - } -} - -fn known_bytecodes_without_aa_code( - vm: &Vm, -) -> HashMap> { - let mut known_bytecodes_without_aa_code = vm - .state - .decommittment_processor - .known_bytecodes - .inner() - .clone(); - - known_bytecodes_without_aa_code - .remove(&h256_to_u256(BASE_SYSTEM_CONTRACTS.default_aa.hash)) - .unwrap(); - - known_bytecodes_without_aa_code -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs deleted file mode 100644 index f8074c1db10..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/invalid_bytecode.rs +++ /dev/null @@ -1,120 +0,0 @@ -use zksync_types::H256; -use zksync_utils::h256_to_u256; - -use crate::interface::TxExecutionMode; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::{HistoryEnabled, TxRevertReason}; - -// TODO this test requires a lot of hacks for bypassing the bytecode checks in the VM. -// Port it later, it's not significant. for now - -#[test] -fn test_invalid_bytecode() { - let mut vm_builder = VmTesterBuilder::new(HistoryEnabled) - .with_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1); - let mut storage = vm_builder.take_storage(); - let mut vm = vm_builder.build(&mut storage); - - let block_gas_per_pubdata = vm_test_env - .block_context - .context - .block_gas_price_per_pubdata(); - - let mut test_vm_with_custom_bytecode_hash = - |bytecode_hash: H256, expected_revert_reason: Option| { - let mut oracle_tools = - OracleTools::new(vm_test_env.storage_ptr.as_mut(), HistoryEnabled); - - let (encoded_tx, predefined_overhead) = get_l1_tx_with_custom_bytecode_hash( - h256_to_u256(bytecode_hash), - block_gas_per_pubdata as u32, - ); - - run_vm_with_custom_factory_deps( - &mut oracle_tools, - vm_test_env.block_context.context, - &vm_test_env.block_properties, - encoded_tx, - predefined_overhead, - expected_revert_reason, - ); - }; - - let failed_to_mark_factory_deps = |msg: &str, data: Vec| { - TxRevertReason::FailedToMarkFactoryDependencies(VmRevertReason::General { - msg: msg.to_string(), - data, - }) - }; - - // Here we provide the correctly-formatted bytecode hash of - // odd length, so it should work. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - None, - ); - - // Here we provide correctly formatted bytecode of even length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 0, 2, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Code length in words must be odd", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 67, 111, 100, 101, 32, 108, 101, 110, - 103, 116, 104, 32, 105, 110, 32, 119, 111, 114, 100, 115, 32, 109, 117, 115, 116, - 32, 98, 101, 32, 111, 100, 100, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); - - // Here we provide incorrectly formatted bytecode of odd length, so - // it should fail. - test_vm_with_custom_bytecode_hash( - H256([ - 2, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, - ]), - Some(failed_to_mark_factory_deps( - "Incorrectly formatted bytecodeHash", - vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 34, 73, 110, 99, 111, 114, 114, 101, 99, - 116, 108, 121, 32, 102, 111, 114, 109, 97, 116, 116, 101, 100, 32, 98, 121, 116, - 101, 99, 111, 100, 101, 72, 97, 115, 104, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - )), - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs deleted file mode 100644 index 2c7ef4a8d11..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/is_write_initial.rs +++ /dev/null @@ -1,42 +0,0 @@ -use crate::interface::storage::ReadStorage; -use zksync_types::get_nonce_key; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_is_write_initial_behaviour() { - // In this test, we check result of `is_write_initial` at different stages. - // The main idea is to check that `is_write_initial` storage uses the correct cache for initial_writes and doesn't - // messed up it with the repeated writes during the one batch execution. - - let mut account = Account::random(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let nonce_key = get_nonce_key(&account.address); - // Check that the next write to the nonce key will be initial. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); - - let contract_code = read_test_contract(); - let tx = account.get_deploy_tx(&contract_code, None, TxType::L2).tx; - - vm.vm.push_transaction(tx); - vm.vm.execute(VmExecutionMode::OneTx); - - // Check that `is_write_initial` still returns true for the nonce key. - assert!(vm - .storage - .as_ref() - .borrow_mut() - .is_write_initial(&nonce_key)); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs deleted file mode 100644 index 64d9f98ddb3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l1_tx_execution.rs +++ /dev/null @@ -1,125 +0,0 @@ -use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::l2_to_l1_log::L2ToL1Log; -use zksync_types::storage_writes_deduplicator::StorageWritesDeduplicator; -use zksync_types::{get_code_key, get_known_code_key, U256}; -use zksync_utils::u256_to_h256; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::{ - read_test_contract, verify_required_storage, BASE_SYSTEM_CONTRACTS, -}; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_l1_tx_execution() { - // In this test, we try to execute a contract deployment from L1 - // Here instead of marking code hash via the bootloader means, we will be - // using L1->L2 communication, the same it would likely be done during the priority mode. - - // There are always at least 3 initial writes here, because we pay fees from l1: - // - totalSupply of ETH token - // - balance of the refund recipient - // - balance of the bootloader - // - tx_rollout hash - - let basic_initial_writes = 1; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let contract_code = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - let deploy_tx = account.get_deploy_tx(&contract_code, None, TxType::L1 { serial_id: 1 }); - let tx_data: TransactionData = deploy_tx.tx.clone().into(); - - let required_l2_to_l1_logs = vec![L2ToL1Log { - shard_id: 0, - is_service: true, - tx_number_in_block: 0, - sender: BOOTLOADER_ADDRESS, - key: tx_data.tx_hash(0.into()), - value: u256_to_h256(U256::from(1u32)), - }]; - - vm.vm.push_transaction(deploy_tx.tx.clone()); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - // The code hash of the deployed contract should be marked as republished. - let known_codes_key = get_known_code_key(&deploy_tx.bytecode_hash); - - // The contract should be deployed successfully. - let account_code_key = get_code_key(&deploy_tx.address); - - let expected_slots = vec![ - (u256_to_h256(U256::from(1u32)), known_codes_key), - (deploy_tx.bytecode_hash, account_code_key), - ]; - assert!(!res.result.is_failed()); - - verify_required_storage(&vm.vm.state, expected_slots); - - assert_eq!(res.logs.l2_to_l1_logs, required_l2_to_l1_logs); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - true, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - - // Tx panicked - assert_eq!(res.initial_storage_writes - basic_initial_writes, 0); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - None, - false, - TxType::L1 { serial_id: 0 }, - ); - vm.vm.push_transaction(tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - let storage_logs = res.logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We changed one slot inside contract - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - - // No repeated writes - let repeated_writes = res.repeated_storage_writes; - assert_eq!(res.repeated_storage_writes, 0); - - vm.vm.push_transaction(tx); - let storage_logs = vm.vm.execute(VmExecutionMode::OneTx).logs.storage_logs; - let res = StorageWritesDeduplicator::apply_on_empty_state(&storage_logs); - // We do the same storage write, it will be deduplicated, so still 4 initial write and 0 repeated - assert_eq!(res.initial_storage_writes - basic_initial_writes, 1); - assert_eq!(res.repeated_storage_writes, repeated_writes); - - let tx = account.get_test_contract_transaction( - deploy_tx.address, - false, - Some(10.into()), - false, - TxType::L1 { serial_id: 1 }, - ); - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - // Method is not payable tx should fail - assert!(result.result.is_failed(), "The transaction should fail"); - - let res = StorageWritesDeduplicator::apply_on_empty_state(&result.logs.storage_logs); - // There are only basic initial writes - assert_eq!(res.initial_storage_writes - basic_initial_writes, 2); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs deleted file mode 100644 index cba534deeaf..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/l2_blocks.rs +++ /dev/null @@ -1,502 +0,0 @@ -//! -//! Tests for the bootloader -//! The description for each of the tests can be found in the corresponding `.yul` file. -//! - -use crate::interface::{ - ExecutionResult, Halt, L2BlockEnv, TxExecutionMode, VmExecutionMode, VmInterface, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::constants::{ - BOOTLOADER_HEAP_PAGE, TX_OPERATOR_L2_BLOCK_INFO_OFFSET, TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO, -}; -use crate::vm_virtual_blocks::tests::tester::default_l1_batch; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::utils::l2_blocks::get_l2_block_hash_key; -use crate::vm_virtual_blocks::Vm; -use crate::HistoryMode; -use zk_evm_1_3_3::aux_structures::Timestamp; -use crate::interface::storage::{ReadStorage, WriteStorage}; -use zksync_system_constants::{ - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, -}; -use zksync_types::block::{pack_block_info, unpack_block_info}; -use zksync_types::{ - block::{legacy_miniblock_hash, miniblock_hash}, - get_code_key, AccountTreeId, Execute, ExecuteTransactionCommon, L1BatchNumber, L1TxCommonData, - MiniblockNumber, StorageKey, Transaction, H160, H256, SYSTEM_CONTEXT_ADDRESS, - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, U256, -}; -use zksync_utils::{h256_to_u256, u256_to_h256}; - -fn get_l1_noop() -> Transaction { - Transaction { - common_data: ExecuteTransactionCommon::L1(L1TxCommonData { - sender: H160::random(), - gas_limit: U256::from(2000000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute: Execute { - contract_address: H160::zero(), - calldata: vec![], - value: U256::zero(), - factory_deps: None, - }, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -#[test] -fn test_l2_block_initialization_timestamp() { - // This test checks that the L2 block initialization works correctly. - // Here we check that that the first block must have timestamp that is greater or equal to the timestamp - // of the current batch. - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - // Override the timestamp of the current miniblock to be 0. - vm.vm.bootloader_state.push_l2_block(L2BlockEnv { - number: 1, - timestamp: 0, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }); - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt {reason: Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())} - ); -} - -#[test] -fn test_l2_block_initialization_number_non_zero() { - // This test checks that the L2 block initialization works correctly. - // Here we check that the first miniblock number can not be zero. - - let l1_batch = default_l1_batch(L1BatchNumber(1)); - let first_l2_block = L2BlockEnv { - number: 0, - timestamp: l1_batch.timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - vm.vm.push_transaction(l1_tx); - - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, first_l2_block, timestamp); - - let res = vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - res.result, - ExecutionResult::Halt { - reason: Halt::FailedToSetL2Block( - "L2 block number is never expected to be zero".to_string() - ) - } - ); -} - -fn test_same_l2_block( - expected_error: Option, - override_timestamp: Option, - override_prev_block_hash: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - vm.vm.push_transaction(l1_tx.clone()); - let res = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!res.result.is_failed()); - - let mut current_l2_block = vm.vm.batch_env.first_l2_block; - - if let Some(timestamp) = override_timestamp { - current_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = override_prev_block_hash { - current_l2_block.prev_block_hash = prev_block_hash; - } - - if (None, None) == (override_timestamp, override_prev_block_hash) { - current_l2_block.max_virtual_blocks_to_create = 0; - } - - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 1, current_l2_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_same_l2_block() { - // This test aims to test the case when there are multiple transactions inside the same L2 block. - - // Case 1: Incorrect timestamp - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The timestamp of the same L2 block must be same".to_string(), - )), - Some(0), - None, - ); - - // Case 2: Incorrect previous block hash - test_same_l2_block( - Some(Halt::FailedToSetL2Block( - "The previous hash of the same L2 block must be same".to_string(), - )), - None, - Some(H256::zero()), - ); - - // Case 3: Correct continuation of the same L2 block - test_same_l2_block(None, None, None); -} - -fn test_new_l2_block( - first_l2_block: L2BlockEnv, - overriden_second_block_number: Option, - overriden_second_block_timestamp: Option, - overriden_second_block_prev_block_hash: Option, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - l1_batch.first_l2_block = first_l2_block; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx.clone()); - vm.vm.execute(VmExecutionMode::OneTx); - - let mut second_l2_block = vm.vm.batch_env.first_l2_block; - second_l2_block.number += 1; - second_l2_block.timestamp += 1; - second_l2_block.prev_block_hash = vm.vm.bootloader_state.last_l2_block().get_hash(); - - if let Some(block_number) = overriden_second_block_number { - second_l2_block.number = block_number; - } - if let Some(timestamp) = overriden_second_block_timestamp { - second_l2_block.timestamp = timestamp; - } - if let Some(prev_block_hash) = overriden_second_block_prev_block_hash { - second_l2_block.prev_block_hash = prev_block_hash; - } - - vm.vm.bootloader_state.push_l2_block(second_l2_block); - - vm.vm.push_transaction(l1_tx); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_new_l2_block() { - // This test is aimed to cover potential issue - - let correct_first_block = L2BlockEnv { - number: 1, - timestamp: 1, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 1, - }; - - // Case 1: Block number increasing by more than 1 - test_new_l2_block( - correct_first_block, - Some(3), - None, - None, - Some(Halt::FailedToSetL2Block( - "Invalid new L2 block number".to_string(), - )), - ); - - // Case 2: Timestamp not increasing - test_new_l2_block( - correct_first_block, - None, - Some(1), - None, - Some(Halt::FailedToSetL2Block("The timestamp of the new L2 block must be greater than the timestamp of the previous L2 block".to_string())), - ); - - // Case 3: Incorrect previous block hash - test_new_l2_block( - correct_first_block, - None, - None, - Some(H256::zero()), - Some(Halt::FailedToSetL2Block( - "The current L2 block hash is incorrect".to_string(), - )), - ); - - // Case 4: Correct new block - test_new_l2_block(correct_first_block, None, None, None, None); -} - -#[allow(clippy::too_many_arguments)] -fn test_first_in_batch( - miniblock_timestamp: u64, - miniblock_number: u32, - pending_txs_hash: H256, - batch_timestamp: u64, - new_batch_timestamp: u64, - batch_number: u32, - proposed_block: L2BlockEnv, - expected_error: Option, -) { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.number += 1; - l1_batch.timestamp = new_batch_timestamp; - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_tx = get_l1_noop(); - - // Setting the values provided. - let storage_ptr = vm.vm.state.storage.storage.get_ptr(); - let miniblock_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - ); - let pending_txs_hash_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, - ); - let batch_info_slot = StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - SYSTEM_CONTEXT_BLOCK_INFO_POSITION, - ); - let prev_block_hash_position = get_l2_block_hash_key(miniblock_number - 1); - - storage_ptr.borrow_mut().set_value( - miniblock_info_slot, - u256_to_h256(pack_block_info( - miniblock_number as u64, - miniblock_timestamp, - )), - ); - storage_ptr - .borrow_mut() - .set_value(pending_txs_hash_slot, pending_txs_hash); - storage_ptr.borrow_mut().set_value( - batch_info_slot, - u256_to_h256(pack_block_info(batch_number as u64, batch_timestamp)), - ); - storage_ptr.borrow_mut().set_value( - prev_block_hash_position, - legacy_miniblock_hash(MiniblockNumber(miniblock_number - 1)), - ); - - // In order to skip checks from the Rust side of the VM, we firstly use some definitely correct L2 block info. - // And then override it with the user-provided value - - let last_l2_block = vm.vm.bootloader_state.last_l2_block(); - let new_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: last_l2_block.timestamp + 1, - prev_block_hash: last_l2_block.get_hash(), - max_virtual_blocks_to_create: last_l2_block.max_virtual_blocks_to_create, - }; - - vm.vm.bootloader_state.push_l2_block(new_l2_block); - vm.vm.push_transaction(l1_tx); - let timestamp = Timestamp(vm.vm.state.local_state.timestamp); - set_manual_l2_block_info(&mut vm.vm, 0, proposed_block, timestamp); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - if let Some(err) = expected_error { - assert_eq!(result.result, ExecutionResult::Halt { reason: err }); - } else { - assert_eq!(result.result, ExecutionResult::Success { output: vec![] }); - } -} - -#[test] -fn test_l2_block_first_in_batch() { - test_first_in_batch( - 1, - 1, - H256::zero(), - 1, - 2, - 1, - L2BlockEnv { - number: 2, - timestamp: 2, - prev_block_hash: miniblock_hash( - MiniblockNumber(1), - 1, - legacy_miniblock_hash(MiniblockNumber(0)), - H256::zero(), - ), - max_virtual_blocks_to_create: 1, - }, - None, - ); - - test_first_in_batch( - 8, - 1, - H256::zero(), - 5, - 12, - 1, - L2BlockEnv { - number: 2, - timestamp: 9, - prev_block_hash: miniblock_hash(MiniblockNumber(1), 8, legacy_miniblock_hash(MiniblockNumber(0)), H256::zero()), - max_virtual_blocks_to_create: 1 - }, - Some(Halt::FailedToSetL2Block("The timestamp of the L2 block must be greater than or equal to the timestamp of the current batch".to_string())), - ); -} - -#[test] -fn test_l2_block_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - vm.vm - .state - .storage - .storage - .get_ptr() - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - let l1_tx = get_l1_noop(); - // Firstly we execute the first transaction - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -#[test] -fn test_l2_block_upgrade_ending() { - let mut l1_batch = default_l1_batch(L1BatchNumber(1)); - l1_batch.timestamp = 1; - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_l1_batch_env(l1_batch.clone()) - .with_random_rich_accounts(1) - .build(); - - let l1_tx = get_l1_noop(); - - let storage = vm.storage.clone(); - - storage - .borrow_mut() - .set_value(get_code_key(&SYSTEM_CONTEXT_ADDRESS), H256::default()); - - vm.vm.push_transaction(l1_tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed(), "No revert reason expected"); - - let virtual_block_info = storage.borrow_mut().read_value(&StorageKey::new( - AccountTreeId::new(SYSTEM_CONTEXT_ADDRESS), - CURRENT_VIRTUAL_BLOCK_INFO_POSITION, - )); - - let (virtual_block_number, virtual_block_timestamp) = - unpack_block_info(h256_to_u256(virtual_block_info)); - - assert_eq!(virtual_block_number as u32, l1_batch.first_l2_block.number); - assert_eq!(virtual_block_timestamp, l1_batch.first_l2_block.timestamp); - vm.vm.push_transaction(l1_tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed(), "No revert reason expected"); - let result = vm.vm.execute(VmExecutionMode::Batch); - assert!(!result.result.is_failed(), "No revert reason expected"); -} - -fn set_manual_l2_block_info( - vm: &mut Vm, - tx_number: usize, - block_info: L2BlockEnv, - timestamp: Timestamp, -) { - let fictive_miniblock_position = - TX_OPERATOR_L2_BLOCK_INFO_OFFSET + TX_OPERATOR_SLOTS_PER_L2_BLOCK_INFO * tx_number; - - vm.state.memory.populate_page( - BOOTLOADER_HEAP_PAGE as usize, - vec![ - (fictive_miniblock_position, block_info.number.into()), - (fictive_miniblock_position + 1, block_info.timestamp.into()), - ( - fictive_miniblock_position + 2, - h256_to_u256(block_info.prev_block_hash), - ), - ( - fictive_miniblock_position + 3, - block_info.max_virtual_blocks_to_create.into(), - ), - ], - timestamp, - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs deleted file mode 100644 index ffb38dd3725..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -mod bootloader; -mod default_aa; -// TODO - fix this test -// mod invalid_bytecode; -mod bytecode_publishing; -mod call_tracer; -mod gas_limit; -mod get_used_contracts; -mod is_write_initial; -mod l1_tx_execution; -mod l2_blocks; -mod nonce_holder; -mod refunds; -mod require_eip712; -mod rollbacks; -mod simple_execution; -mod tester; -mod tracing_execution_error; -mod upgrade; -mod utils; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs deleted file mode 100644 index 162a3f46cb1..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/nonce_holder.rs +++ /dev/null @@ -1,182 +0,0 @@ -use zksync_types::{Execute, Nonce}; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, TxRevertReason, VmExecutionMode, VmInterface, - VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_nonce_holder_tester; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -pub enum NonceHolderTestMode { - SetValueUnderNonce, - IncreaseMinNonceBy5, - IncreaseMinNonceTooMuch, - LeaveNonceUnused, - IncreaseMinNonceBy1, - SwitchToArbitraryOrdering, -} - -impl From for u8 { - fn from(mode: NonceHolderTestMode) -> u8 { - match mode { - NonceHolderTestMode::SetValueUnderNonce => 0, - NonceHolderTestMode::IncreaseMinNonceBy5 => 1, - NonceHolderTestMode::IncreaseMinNonceTooMuch => 2, - NonceHolderTestMode::LeaveNonceUnused => 3, - NonceHolderTestMode::IncreaseMinNonceBy1 => 4, - NonceHolderTestMode::SwitchToArbitraryOrdering => 5, - } - } -} - -#[test] -fn test_nonce_holder() { - let mut account = Account::random(); - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_custom_contracts(vec![( - read_nonce_holder_tester().to_vec(), - account.address, - true, - )]) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let mut run_nonce_test = |nonce: u32, - test_mode: NonceHolderTestMode, - error_message: Option, - comment: &'static str| { - // In this test we have to reset VM state after each test case. Because once bootloader failed during the validation of the transaction, - // it will fail again and again. At the same time we have to keep the same storage, because we want to keep the nonce holder contract state. - // The easiest way in terms of lifetimes is to reuse vm_builder to achieve it. - vm.reset_state(true); - let mut transaction_data: TransactionData = account - .get_l2_tx_for_execute_with_nonce( - Execute { - contract_address: account.address, - calldata: vec![12], - value: Default::default(), - factory_deps: None, - }, - None, - Nonce(nonce), - ) - .into(); - - transaction_data.signature = vec![test_mode.into()]; - vm.vm.push_raw_transaction(transaction_data, 0, 0, true); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - if let Some(msg) = error_message { - let expected_error = - TxRevertReason::Halt(Halt::ValidationFailed(VmRevertReason::General { - msg, - data: vec![], - })); - let ExecutionResult::Halt { reason } = result.result else { - panic!("Expected revert, got {:?}", result.result); - }; - assert_eq!( - reason.to_string(), - expected_error.to_string(), - "{}", - comment - ); - } else { - assert!(!result.result.is_failed(), "{}", comment); - } - }; - // Test 1: trying to set value under non sequential nonce value. - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - Some("Previous nonce has not been used".to_string()), - "Allowed to set value under non sequential value", - ); - - // Test 2: increase min nonce by 1 with sequential nonce ordering: - run_nonce_test( - 0u32, - NonceHolderTestMode::IncreaseMinNonceBy1, - None, - "Failed to increment nonce by 1 for sequential account", - ); - - // Test 3: correctly set value under nonce with sequential nonce ordering: - run_nonce_test( - 1u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Failed to set value under nonce sequential value", - ); - - // Test 5: migrate to the arbitrary nonce ordering: - run_nonce_test( - 2u32, - NonceHolderTestMode::SwitchToArbitraryOrdering, - None, - "Failed to switch to arbitrary ordering", - ); - - // Test 6: increase min nonce by 5 - run_nonce_test( - 6u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Failed to increase min nonce by 5", - ); - - // Test 7: since the nonces in range [6,10] are no longer allowed, the - // tx with nonce 10 should not be allowed - run_nonce_test( - 10u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse nonce below the minimal one", - ); - - // Test 8: we should be able to use nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::SetValueUnderNonce, - None, - "Did not allow to use unused nonce 10", - ); - - // Test 9: we should not be able to reuse nonce 13 - run_nonce_test( - 13u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - Some("Reusing the same nonce twice".to_string()), - "Allowed to reuse the same nonce twice", - ); - - // Test 10: we should be able to simply use nonce 14, while bumping the minimal nonce by 5 - run_nonce_test( - 14u32, - NonceHolderTestMode::IncreaseMinNonceBy5, - None, - "Did not allow to use a bumped nonce", - ); - - // Test 11: Do not allow bumping nonce by too much - run_nonce_test( - 16u32, - NonceHolderTestMode::IncreaseMinNonceTooMuch, - Some("The value for incrementing the nonce is too high".to_string()), - "Allowed for incrementing min nonce too much", - ); - - // Test 12: Do not allow not setting a nonce as used - run_nonce_test( - 16u32, - NonceHolderTestMode::LeaveNonceUnused, - Some("The nonce was not set as used".to_string()), - "Allowed to leave nonce as unused", - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs deleted file mode 100644 index d0b3b7cbee3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/refunds.rs +++ /dev/null @@ -1,152 +0,0 @@ -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::{DeployContractsTx, TxType, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::types::internals::TransactionData; - -#[test] -fn test_predetermined_refunded_gas() { - // In this test, we compare the execution of the bootloader with the predefined - // refunded gas and without them - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let l1_batch = vm.vm.batch_env.clone(); - - let counter = read_test_contract(); - let account = &mut vm.rich_accounts[0]; - - let DeployContractsTx { - tx, - bytecode_hash: _, - address: _, - } = account.get_deploy_tx(&counter, None, TxType::L2); - vm.vm.push_transaction(tx.clone()); - let result = vm.vm.execute(VmExecutionMode::OneTx); - - assert!(!result.result.is_failed()); - - // If the refund provided by the operator or the final refund are the 0 - // there is no impact of the operator's refund at all and so this test does not - // make much sense. - assert!( - result.refunds.operator_suggested_refund > 0, - "The operator's refund is 0" - ); - assert!(result.refunds.gas_refunded > 0, "The final refund is 0"); - - let result_without_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_without_predefined_refunds = vm.vm.get_current_execution_state(); - assert!(!result_without_predefined_refunds.result.is_failed(),); - - // Here we want to provide the same refund from the operator and check that it's the correct one. - // We execute the whole block without refund tracer, because refund tracer will eventually override the provided refund. - // But the overall result should be the same - - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch.clone()) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let tx: TransactionData = tx.into(); - let block_gas_per_pubdata_byte = vm.vm.batch_env.block_gas_price_per_pubdata(); - // Overhead - let overhead = tx.overhead_gas(block_gas_per_pubdata_byte as u32); - vm.vm - .push_raw_transaction(tx.clone(), overhead, result.refunds.gas_refunded, true); - - let result_with_predefined_refunds = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result_with_predefined_refunds.result.is_failed()); - - // We need to sort these lists as those are flattened from HashMaps - current_state_with_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); - - // In this test we put the different refund from the operator. - // We still can't use the refund tracer, because it will override the refund. - // But we can check that the logs and events have changed. - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_l1_batch_env(l1_batch) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account.clone()]) - .build(); - - let changed_operator_suggested_refund = result.refunds.gas_refunded + 1000; - vm.vm - .push_raw_transaction(tx, overhead, changed_operator_suggested_refund, true); - let result = vm.vm.execute(VmExecutionMode::Batch); - let mut current_state_with_changed_predefined_refunds = vm.vm.get_current_execution_state(); - - assert!(!result.result.is_failed()); - current_state_with_changed_predefined_refunds - .used_contract_hashes - .sort(); - current_state_without_predefined_refunds - .used_contract_hashes - .sort(); - - assert_eq!( - current_state_with_changed_predefined_refunds.events.len(), - current_state_without_predefined_refunds.events.len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.events, - current_state_without_predefined_refunds.events - ); - - assert_eq!( - current_state_with_changed_predefined_refunds.l2_to_l1_logs, - current_state_without_predefined_refunds.l2_to_l1_logs - ); - - assert_eq!( - current_state_with_changed_predefined_refunds - .storage_log_queries - .len(), - current_state_without_predefined_refunds - .storage_log_queries - .len() - ); - - assert_ne!( - current_state_with_changed_predefined_refunds.storage_log_queries, - current_state_without_predefined_refunds.storage_log_queries - ); - assert_eq!( - current_state_with_changed_predefined_refunds.used_contract_hashes, - current_state_without_predefined_refunds.used_contract_hashes - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs deleted file mode 100644 index 988841e90ce..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/require_eip712.rs +++ /dev/null @@ -1,161 +0,0 @@ -use std::convert::TryInto; - -use ethabi::Token; - -use zksync_eth_signer::raw_ethereum_tx::TransactionParameters; -use zksync_eth_signer::EthereumSigner; -use zksync_system_constants::L2_ETH_TOKEN_ADDRESS; -use zksync_types::fee::Fee; -use zksync_types::l2::L2Tx; -use zksync_types::transaction_request::TransactionRequest; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, Eip712Domain, Execute, Nonce, Transaction, U256}; - -use crate::interface::{TxExecutionMode, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{Account, VmTester, VmTesterBuilder}; -use crate::vm_virtual_blocks::tests::utils::read_many_owners_custom_account_contract; - -impl VmTester { - pub(crate) fn get_eth_balance(&mut self, address: Address) -> U256 { - let key = storage_key_for_standard_token_balance( - AccountTreeId::new(L2_ETH_TOKEN_ADDRESS), - &address, - ); - self.vm.state.storage.storage.read_from_storage(&key) - } -} - -// TODO refactor this test it use too much internal details of the VM -#[tokio::test] -/// This test deploys 'buggy' account abstraction code, and then tries accessing it both with legacy -/// and EIP712 transactions. -/// Currently we support both, but in the future, we should allow only EIP712 transactions to access the AA accounts. -async fn test_require_eip712() { - // Use 3 accounts: - // - private_address - EOA account, where we have the key - // - account_address - AA account, where the contract is deployed - // - beneficiary - an EOA account, where we'll try to transfer the tokens. - let account_abstraction = Account::random(); - let mut private_account = Account::random(); - let beneficiary = Account::random(); - - let (bytecode, contract) = read_many_owners_custom_account_contract(); - let mut vm = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_custom_contracts(vec![(bytecode, account_abstraction.address, true)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_rich_accounts(vec![account_abstraction.clone(), private_account.clone()]) - .build(); - - assert_eq!(vm.get_eth_balance(beneficiary.address), U256::from(0)); - - let chain_id: u32 = 270; - - // First, let's set the owners of the AA account to the private_address. - // (so that messages signed by private_address, are authorized to act on behalf of the AA account). - let set_owners_function = contract.function("setOwners").unwrap(); - let encoded_input = set_owners_function - .encode_input(&[Token::Array(vec![Token::Address(private_account.address)])]) - .unwrap(); - - let tx = private_account.get_l2_tx_for_execute( - Execute { - contract_address: account_abstraction.address, - calldata: encoded_input, - value: Default::default(), - factory_deps: None, - }, - None, - ); - - vm.vm.push_transaction(tx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - - let private_account_balance = vm.get_eth_balance(private_account.address); - - // And now let's do the transfer from the 'account abstraction' to 'beneficiary' (using 'legacy' transaction). - // Normally this would not work - unless the operator is malicious. - let aa_raw_tx = TransactionParameters { - nonce: U256::from(0), - to: Some(beneficiary.address), - gas: U256::from(100000000), - gas_price: Some(U256::from(10000000)), - value: U256::from(888000088), - data: vec![], - chain_id: 270, - transaction_type: None, - access_list: None, - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - }; - - let aa_tx = private_account.sign_legacy_tx(aa_raw_tx).await; - let (tx_request, hash) = TransactionRequest::from_bytes(&aa_tx, 270.into()).unwrap(); - - let mut l2_tx: L2Tx = L2Tx::from_request(tx_request, 10000).unwrap(); - l2_tx.set_input(aa_tx, hash); - // Pretend that operator is malicious and sets the initiator to the AA account. - l2_tx.common_data.initiator_address = account_abstraction.address; - let transaction: Transaction = l2_tx.try_into().unwrap(); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(888000088) - ); - // Make sure that the tokens were transferred from the AA account. - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); - - // // Now send the 'classic' EIP712 transaction - let tx_712 = L2Tx::new( - beneficiary.address, - vec![], - Nonce(1), - Fee { - gas_limit: U256::from(1000000000), - max_fee_per_gas: U256::from(1000000000), - max_priority_fee_per_gas: U256::from(1000000000), - gas_per_pubdata_limit: U256::from(1000000000), - }, - account_abstraction.address, - U256::from(28374938), - None, - Default::default(), - ); - - let transaction_request: TransactionRequest = tx_712.into(); - - let domain = Eip712Domain::new(chain_id.into()); - let signature = private_account - .get_pk_signer() - .sign_typed_data(&domain, &transaction_request) - .await - .unwrap(); - let encoded_tx = transaction_request.get_signed_bytes(&signature, chain_id.into()); - - let (aa_txn_request, aa_hash) = - TransactionRequest::from_bytes(&encoded_tx, chain_id.into()).unwrap(); - - let mut l2_tx = L2Tx::from_request(aa_txn_request, 100000).unwrap(); - l2_tx.set_input(encoded_tx, aa_hash); - - let transaction: Transaction = l2_tx.try_into().unwrap(); - vm.vm.push_transaction(transaction); - vm.vm.execute(VmExecutionMode::OneTx); - - assert_eq!( - vm.get_eth_balance(beneficiary.address), - U256::from(916375026) - ); - assert_eq!( - private_account_balance, - vm.get_eth_balance(private_account.address) - ); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs deleted file mode 100644 index 240b7188377..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/rollbacks.rs +++ /dev/null @@ -1,146 +0,0 @@ -use ethabi::Token; - -use zksync_contracts::get_loadnext_contract; -use zksync_contracts::test_contracts::LoadnextContractExecutionParams; - -use zksync_types::{Execute, U256}; - -use crate::interface::TxExecutionMode; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - DeployContractsTx, TransactionTestInfo, TxModifier, TxType, VmTesterBuilder, -}; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; - -#[test] -fn test_vm_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let mut account = vm.rich_accounts[0].clone(); - let counter = read_test_contract(); - let tx_0 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_1 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - let tx_2 = account.get_deploy_tx(&counter, None, TxType::L2).tx; - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(tx_0.clone(), false), - TransactionTestInfo::new_processed(tx_1.clone(), false), - TransactionTestInfo::new_processed(tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignatureLength.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongMagicValue.into()), - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::WrongSignature.into()), - // The correct nonce is 0, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_0.clone(), false), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // The correct nonce is 1, this tx will fail - TransactionTestInfo::new_rejected(tx_2.clone(), TxModifier::WrongNonce.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_1, false), - // The correct nonce is 2, this tx will fail - TransactionTestInfo::new_rejected(tx_0.clone(), TxModifier::NonceReused.into()), - // This tx will succeed - TransactionTestInfo::new_processed(tx_2.clone(), false), - // This tx will fail - TransactionTestInfo::new_rejected(tx_2, TxModifier::NonceReused.into()), - TransactionTestInfo::new_rejected(tx_0, TxModifier::NonceReused.into()), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} - -#[test] -fn test_vm_loadnext_rollbacks() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - let mut account = vm.rich_accounts[0].clone(); - - let loadnext_contract = get_loadnext_contract(); - let loadnext_constructor_data = &[Token::Uint(U256::from(100))]; - let DeployContractsTx { - tx: loadnext_deploy_tx, - address, - .. - } = account.get_deploy_tx_with_factory_deps( - &loadnext_contract.bytecode, - Some(loadnext_constructor_data), - loadnext_contract.factory_deps.clone(), - TxType::L2, - ); - - let loadnext_tx_1 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let loadnext_tx_2 = account.get_l2_tx_for_execute( - Execute { - contract_address: address, - calldata: LoadnextContractExecutionParams { - reads: 100, - writes: 100, - events: 100, - hashes: 500, - recursive_calls: 10, - deploys: 60, - } - .to_bytes(), - value: Default::default(), - factory_deps: None, - }, - None, - ); - - let result_without_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), false), - ]); - - // reset vm - vm.reset_with_empty_storage(); - - let result_with_rollbacks = vm.execute_and_verify_txs(&vec![ - TransactionTestInfo::new_processed(loadnext_deploy_tx.clone(), false), - TransactionTestInfo::new_processed(loadnext_tx_1.clone(), true), - TransactionTestInfo::new_rejected( - loadnext_deploy_tx.clone(), - TxModifier::NonceReused.into(), - ), - TransactionTestInfo::new_processed(loadnext_tx_1, false), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_processed(loadnext_tx_2.clone(), true), - TransactionTestInfo::new_rejected(loadnext_deploy_tx, TxModifier::NonceReused.into()), - TransactionTestInfo::new_processed(loadnext_tx_2, false), - ]); - - assert_eq!(result_without_rollbacks, result_with_rollbacks); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs deleted file mode 100644 index c4eac73499f..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/simple_execution.rs +++ /dev/null @@ -1,77 +0,0 @@ -use crate::interface::{ExecutionResult, VmExecutionMode, VmInterface}; -use crate::vm_latest::HistoryDisabled; -use crate::vm_virtual_blocks::tests::tester::{TxType, VmTesterBuilder}; - -#[test] -fn estimate_fee() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - let account = &mut vm_tester.rich_accounts[0]; - - let tx = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L2, - ); - - vm_tester.vm.push_transaction(tx); - - let result = vm_tester.vm.execute(VmExecutionMode::OneTx); - assert_matches!(result.result, ExecutionResult::Success { .. }); -} - -#[test] -fn simple_execute() { - let mut vm_tester = VmTesterBuilder::new(HistoryDisabled) - .with_empty_in_memory_storage() - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - vm_tester.deploy_test_contract(); - - let account = &mut vm_tester.rich_accounts[0]; - - let tx1 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx2 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - true, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - - let tx3 = account.get_test_contract_transaction( - vm_tester.test_contract.unwrap(), - false, - Default::default(), - false, - TxType::L1 { serial_id: 1 }, - ); - let vm = &mut vm_tester.vm; - vm.push_transaction(tx1); - vm.push_transaction(tx2); - vm.push_transaction(tx3); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Revert { .. }); - let tx = vm.execute(VmExecutionMode::OneTx); - assert_matches!(tx.result, ExecutionResult::Success { .. }); - let block_tip = vm.execute(VmExecutionMode::Batch); - assert_matches!(block_tip.result, ExecutionResult::Success { .. }); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs deleted file mode 100644 index a5c0db9468b..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/inner_state.rs +++ /dev/null @@ -1,119 +0,0 @@ -use std::collections::HashMap; - -use zk_evm_1_3_3::aux_structures::Timestamp; -use zk_evm_1_3_3::vm_state::VmLocalState; -use crate::interface::storage::WriteStorage; - -use zksync_types::{StorageKey, StorageLogQuery, StorageValue, U256}; - -use crate::vm_virtual_blocks::old_vm::event_sink::InMemoryEventSink; -use crate::vm_virtual_blocks::old_vm::history_recorder::{ - AppDataFrameManagerWithHistory, HistoryRecorder, -}; -use crate::vm_virtual_blocks::{HistoryEnabled, HistoryMode, SimpleMemory, Vm}; -use crate::HistoryMode as CommonHistoryMode; - -#[derive(Clone, Debug)] -pub(crate) struct ModifiedKeysMap(HashMap); - -// We consider hashmaps to be equal even if there is a key -// that is not present in one but has zero value in another. -impl PartialEq for ModifiedKeysMap { - fn eq(&self, other: &Self) -> bool { - for (key, value) in self.0.iter() { - if *value != other.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - for (key, value) in other.0.iter() { - if *value != self.0.get(key).cloned().unwrap_or_default() { - return false; - } - } - true - } -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct DecommitterTestInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - pub(crate) known_bytecodes: HistoryRecorder>, H>, - pub(crate) decommitted_code_hashes: HistoryRecorder, HistoryEnabled>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct StorageOracleInnerState { - /// There is no way to "truly" compare the storage pointer, - /// so we just compare the modified keys. This is reasonable enough. - pub(crate) modified_storage_keys: ModifiedKeysMap, - - pub(crate) frames_stack: AppDataFrameManagerWithHistory, H>, -} - -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct PrecompileProcessorTestInnerState { - pub(crate) timestamp_history: HistoryRecorder, H>, -} - -/// A struct that encapsulates the state of the VM's oracles -/// The state is to be used in tests. -#[derive(Clone, PartialEq, Debug)] -pub(crate) struct VmInstanceInnerState { - event_sink: InMemoryEventSink, - precompile_processor_state: PrecompileProcessorTestInnerState, - memory: SimpleMemory, - decommitter_state: DecommitterTestInnerState, - storage_oracle_state: StorageOracleInnerState, - local_state: VmLocalState, -} - -impl Vm { - // Dump inner state of the VM. - pub(crate) fn dump_inner_state(&self) -> VmInstanceInnerState { - let event_sink = self.state.event_sink.clone(); - let precompile_processor_state = PrecompileProcessorTestInnerState { - timestamp_history: self.state.precompiles_processor.timestamp_history.clone(), - }; - let memory = self.state.memory.clone(); - let decommitter_state = DecommitterTestInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .decommittment_processor - .get_storage() - .borrow() - .modified_storage_keys() - .clone(), - ), - known_bytecodes: self.state.decommittment_processor.known_bytecodes.clone(), - decommitted_code_hashes: self - .state - .decommittment_processor - .get_decommitted_code_hashes_with_history() - .clone(), - }; - let storage_oracle_state = StorageOracleInnerState { - modified_storage_keys: ModifiedKeysMap( - self.state - .storage - .storage - .get_ptr() - .borrow() - .modified_storage_keys() - .clone(), - ), - frames_stack: self.state.storage.frames_stack.clone(), - }; - let local_state = self.state.local_state.clone(); - - VmInstanceInnerState { - event_sink, - precompile_processor_state, - memory, - decommitter_state, - storage_oracle_state, - local_state, - } - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs deleted file mode 100644 index dfe8905a7e0..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -pub(crate) use transaction_test_info::{ExpectedError, TransactionTestInfo, TxModifier}; -pub(crate) use vm_tester::{default_l1_batch, InMemoryStorageView, VmTester, VmTesterBuilder}; -pub(crate) use zksync_test_account::{Account, DeployContractsTx, TxType}; - -mod inner_state; -mod transaction_test_info; -mod vm_tester; diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs deleted file mode 100644 index 15d3d98ab1d..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/transaction_test_info.rs +++ /dev/null @@ -1,216 +0,0 @@ -use zksync_types::{ExecuteTransactionCommon, Transaction}; - -use crate::interface::{ - CurrentExecutionState, ExecutionResult, Halt, TxRevertReason, VmExecutionMode, - VmExecutionResultAndLogs, VmInterface, VmInterfaceHistoryEnabled, VmRevertReason, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::vm_tester::VmTester; - -#[derive(Debug, Clone)] -pub(crate) enum TxModifier { - WrongSignatureLength, - WrongSignature, - WrongMagicValue, - WrongNonce, - NonceReused, -} - -#[derive(Debug, Clone)] -pub(crate) enum TxExpectedResult { - Rejected { error: ExpectedError }, - Processed { rollback: bool }, -} - -#[derive(Debug, Clone)] -pub(crate) struct TransactionTestInfo { - tx: Transaction, - result: TxExpectedResult, -} - -#[derive(Debug, Clone)] -pub(crate) struct ExpectedError { - pub(crate) revert_reason: TxRevertReason, - pub(crate) modifier: Option, -} - -impl From for ExpectedError { - fn from(value: TxModifier) -> Self { - let revert_reason = match value { - TxModifier::WrongSignatureLength => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Signature length is incorrect".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 29, 83, 105, 103, 110, 97, 116, 117, 114, 101, 32, - 108, 101, 110, 103, 116, 104, 32, 105, 115, 32, 105, 110, 99, 111, 114, 114, 101, 99, - 116, 0, 0, 0, - ], - }) - } - TxModifier::WrongSignature => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Account validation returned invalid magic value. Most often this means that the signature is incorrect".to_string(), - data: vec![], - }) - } - TxModifier::WrongMagicValue => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "v is neither 27 nor 28".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 118, 32, 105, 115, 32, 110, 101, 105, 116, 104, - 101, 114, 32, 50, 55, 32, 110, 111, 114, 32, 50, 56, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - - } - TxModifier::WrongNonce => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Incorrect nonce".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 15, 73, 110, 99, 111, 114, 114, 101, 99, 116, 32, 110, - 111, 110, 99, 101, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - ], - }) - } - TxModifier::NonceReused => { - Halt::ValidationFailed(VmRevertReason::General { - msg: "Reusing the same nonce twice".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 28, 82, 101, 117, 115, 105, 110, 103, 32, 116, 104, - 101, 32, 115, 97, 109, 101, 32, 110, 111, 110, 99, 101, 32, 116, 119, 105, 99, 101, 0, - 0, 0, 0, - ], - }) - } - }; - - ExpectedError { - revert_reason: TxRevertReason::Halt(revert_reason), - modifier: Some(value), - } - } -} - -impl TransactionTestInfo { - pub(crate) fn new_rejected( - mut transaction: Transaction, - expected_error: ExpectedError, - ) -> Self { - transaction.common_data = match transaction.common_data { - ExecuteTransactionCommon::L2(mut data) => { - if let Some(modifier) = &expected_error.modifier { - match modifier { - TxModifier::WrongSignatureLength => { - data.signature = data.signature[..data.signature.len() - 20].to_vec() - } - TxModifier::WrongSignature => data.signature = vec![27u8; 65], - TxModifier::WrongMagicValue => data.signature = vec![1u8; 65], - TxModifier::WrongNonce => { - // Do not need to modify signature for nonce error - } - TxModifier::NonceReused => { - // Do not need to modify signature for nonce error - } - } - } - ExecuteTransactionCommon::L2(data) - } - _ => panic!("L1 transactions are not supported"), - }; - - Self { - tx: transaction, - result: TxExpectedResult::Rejected { - error: expected_error, - }, - } - } - - pub(crate) fn new_processed(transaction: Transaction, should_be_rollbacked: bool) -> Self { - Self { - tx: transaction, - result: TxExpectedResult::Processed { - rollback: should_be_rollbacked, - }, - } - } - - fn verify_result(&self, result: &VmExecutionResultAndLogs) { - match &self.result { - TxExpectedResult::Rejected { error } => match &result.result { - ExecutionResult::Success { .. } => { - panic!("Transaction should be reverted {:?}", self.tx.nonce()) - } - ExecutionResult::Revert { output } => match &error.revert_reason { - TxRevertReason::TxReverted(expected) => { - assert_eq!(output, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - ExecutionResult::Halt { reason } => match &error.revert_reason { - TxRevertReason::Halt(expected) => { - assert_eq!(reason, expected) - } - _ => { - panic!("Error types mismatch"); - } - }, - }, - TxExpectedResult::Processed { .. } => { - assert!(!result.result.is_failed()); - } - } - } - - fn should_rollback(&self) -> bool { - match &self.result { - TxExpectedResult::Rejected { .. } => true, - TxExpectedResult::Processed { rollback } => *rollback, - } - } -} - -impl VmTester { - pub(crate) fn execute_and_verify_txs( - &mut self, - txs: &[TransactionTestInfo], - ) -> CurrentExecutionState { - for tx_test_info in txs { - self.execute_tx_and_verify(tx_test_info.clone()); - } - self.vm.execute(VmExecutionMode::Batch); - let mut state = self.vm.get_current_execution_state(); - state.used_contract_hashes.sort(); - state - } - - pub(crate) fn execute_tx_and_verify( - &mut self, - tx_test_info: TransactionTestInfo, - ) -> VmExecutionResultAndLogs { - let inner_state_before = self.vm.dump_inner_state(); - self.vm.make_snapshot(); - self.vm.push_transaction(tx_test_info.tx.clone()); - let result = self.vm.execute(VmExecutionMode::OneTx); - tx_test_info.verify_result(&result); - if tx_test_info.should_rollback() { - self.vm.rollback_to_the_latest_snapshot(); - let inner_state_after = self.vm.dump_inner_state(); - assert_eq!( - inner_state_before, inner_state_after, - "Inner state before and after rollback should be equal" - ); - } - result - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs deleted file mode 100644 index 9fe0635eba3..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tester/vm_tester.rs +++ /dev/null @@ -1,291 +0,0 @@ -use std::marker::PhantomData; -use zksync_contracts::BaseSystemContracts; -use crate::interface::storage::{InMemoryStorage, StoragePtr, StorageView, WriteStorage}; - -use crate::HistoryMode; -use zksync_types::block::legacy_miniblock_hash; -use zksync_types::helpers::unix_timestamp_ms; -use zksync_types::utils::{deployed_address_create, storage_key_for_eth_balance}; -use zksync_types::{ - get_code_key, get_is_account_key, Address, L1BatchNumber, MiniblockNumber, Nonce, - ProtocolVersionId, U256, -}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::u256_to_h256; - -use crate::vm_virtual_blocks::constants::BLOCK_GAS_LIMIT; - -use crate::interface::{L1BatchEnv, L2Block, L2BlockEnv, SystemEnv, VmExecutionMode}; -use crate::interface::{TxExecutionMode, VmInterface}; -use crate::vm_virtual_blocks::tests::tester::Account; -use crate::vm_virtual_blocks::tests::tester::TxType; -use crate::vm_virtual_blocks::tests::utils::read_test_contract; -use crate::vm_virtual_blocks::utils::l2_blocks::load_last_l2_block; -use crate::vm_virtual_blocks::Vm; - -pub(crate) type InMemoryStorageView = StorageView; - -pub(crate) struct VmTester { - pub(crate) vm: Vm, - pub(crate) storage: StoragePtr, - pub(crate) fee_account: Address, - pub(crate) deployer: Option, - pub(crate) test_contract: Option
, - pub(crate) rich_accounts: Vec, - pub(crate) custom_contracts: Vec, - _phantom: PhantomData, -} - -impl VmTester { - pub(crate) fn deploy_test_contract(&mut self) { - let contract = read_test_contract(); - let tx = self - .deployer - .as_mut() - .expect("You have to initialize builder with deployer") - .get_deploy_tx(&contract, None, TxType::L2) - .tx; - let nonce = tx.nonce().unwrap().0.into(); - self.vm.push_transaction(tx); - self.vm.execute(VmExecutionMode::OneTx); - let deployed_address = - deployed_address_create(self.deployer.as_ref().unwrap().address, nonce); - self.test_contract = Some(deployed_address); - } - - pub(crate) fn reset_with_empty_storage(&mut self) { - self.storage = StorageView::new(get_empty_storage()).to_rc_ptr(); - self.reset_state(false); - } - - /// Reset the state of the VM to the initial state. - /// If `use_latest_l2_block` is true, then the VM will use the latest L2 block from storage, - /// otherwise it will use the first L2 block of l1 batch env - pub(crate) fn reset_state(&mut self, use_latest_l2_block: bool) { - for account in self.rich_accounts.iter_mut() { - account.nonce = Nonce(0); - make_account_rich(self.storage.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(self.storage.clone(), deployer); - } - - if !self.custom_contracts.is_empty() { - println!("Inserting custom contracts is not yet supported") - // insert_contracts(&mut self.storage, &self.custom_contracts); - } - - let mut l1_batch = self.vm.batch_env.clone(); - if use_latest_l2_block { - let last_l2_block = load_last_l2_block(self.storage.clone()).unwrap_or(L2Block { - number: 0, - timestamp: 0, - hash: legacy_miniblock_hash(MiniblockNumber(0)), - }); - l1_batch.first_l2_block = L2BlockEnv { - number: last_l2_block.number + 1, - timestamp: std::cmp::max(last_l2_block.timestamp + 1, l1_batch.timestamp), - prev_block_hash: last_l2_block.hash, - max_virtual_blocks_to_create: 1, - }; - } - - let vm = Vm::new(l1_batch, self.vm.system_env.clone(), self.storage.clone()); - - if self.test_contract.is_some() { - self.deploy_test_contract(); - } - - self.vm = vm; - } -} - -pub(crate) type ContractsToDeploy = (Vec, Address, bool); - -pub(crate) struct VmTesterBuilder { - _phantom: PhantomData, - storage: Option, - l1_batch_env: Option, - system_env: SystemEnv, - deployer: Option, - rich_accounts: Vec, - custom_contracts: Vec, -} - -impl Clone for VmTesterBuilder { - fn clone(&self) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: self.l1_batch_env.clone(), - system_env: self.system_env.clone(), - deployer: self.deployer.clone(), - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - } - } -} - -#[allow(dead_code)] -impl VmTesterBuilder { - pub(crate) fn new(_: H) -> Self { - Self { - _phantom: PhantomData, - storage: None, - l1_batch_env: None, - system_env: SystemEnv { - zk_porter_available: false, - version: ProtocolVersionId::latest(), - base_system_smart_contracts: BaseSystemContracts::playground(), - gas_limit: BLOCK_GAS_LIMIT, - execution_mode: TxExecutionMode::VerifyExecute, - default_validation_computational_gas_limit: BLOCK_GAS_LIMIT, - chain_id: 270.into(), - }, - deployer: None, - rich_accounts: vec![], - custom_contracts: vec![], - } - } - - pub(crate) fn with_l1_batch_env(mut self, l1_batch_env: L1BatchEnv) -> Self { - self.l1_batch_env = Some(l1_batch_env); - self - } - - pub(crate) fn with_system_env(mut self, system_env: SystemEnv) -> Self { - self.system_env = system_env; - self - } - - pub(crate) fn with_storage(mut self, storage: InMemoryStorage) -> Self { - self.storage = Some(storage); - self - } - - pub(crate) fn with_base_system_smart_contracts( - mut self, - base_system_smart_contracts: BaseSystemContracts, - ) -> Self { - self.system_env.base_system_smart_contracts = base_system_smart_contracts; - self - } - - pub(crate) fn with_gas_limit(mut self, gas_limit: u32) -> Self { - self.system_env.gas_limit = gas_limit; - self - } - - pub(crate) fn with_execution_mode(mut self, execution_mode: TxExecutionMode) -> Self { - self.system_env.execution_mode = execution_mode; - self - } - - pub(crate) fn with_empty_in_memory_storage(mut self) -> Self { - self.storage = Some(get_empty_storage()); - self - } - - pub(crate) fn with_random_rich_accounts(mut self, number: u32) -> Self { - for _ in 0..number { - let account = Account::random(); - self.rich_accounts.push(account); - } - self - } - - pub(crate) fn with_rich_accounts(mut self, accounts: Vec) -> Self { - self.rich_accounts.extend(accounts); - self - } - - pub(crate) fn with_deployer(mut self) -> Self { - let deployer = Account::random(); - self.deployer = Some(deployer); - self - } - - pub(crate) fn with_custom_contracts(mut self, contracts: Vec) -> Self { - self.custom_contracts = contracts; - self - } - - pub(crate) fn build(self) -> VmTester { - let l1_batch_env = self - .l1_batch_env - .unwrap_or_else(|| default_l1_batch(L1BatchNumber(1))); - - let mut raw_storage = self.storage.unwrap_or_else(get_empty_storage); - insert_contracts(&mut raw_storage, &self.custom_contracts); - let storage_ptr = StorageView::new(raw_storage).to_rc_ptr(); - for account in self.rich_accounts.iter() { - make_account_rich(storage_ptr.clone(), account); - } - if let Some(deployer) = &self.deployer { - make_account_rich(storage_ptr.clone(), deployer); - } - let fee_account = l1_batch_env.fee_account; - - let vm = Vm::new(l1_batch_env, self.system_env, storage_ptr.clone()); - - VmTester { - vm, - storage: storage_ptr, - fee_account, - deployer: self.deployer, - test_contract: None, - rich_accounts: self.rich_accounts.clone(), - custom_contracts: self.custom_contracts.clone(), - _phantom: PhantomData, - } - } -} - -pub(crate) fn default_l1_batch(number: L1BatchNumber) -> L1BatchEnv { - let timestamp = unix_timestamp_ms(); - L1BatchEnv { - previous_batch_hash: None, - number, - timestamp, - l1_gas_price: 50_000_000_000, // 50 gwei - fair_l2_gas_price: 250_000_000, // 0.25 gwei - fee_account: Address::random(), - enforced_base_fee: None, - first_l2_block: L2BlockEnv { - number: 1, - timestamp, - prev_block_hash: legacy_miniblock_hash(MiniblockNumber(0)), - max_virtual_blocks_to_create: 100, - }, - } -} - -pub(crate) fn make_account_rich(storage: StoragePtr, account: &Account) { - let key = storage_key_for_eth_balance(&account.address); - storage - .as_ref() - .borrow_mut() - .set_value(key, u256_to_h256(U256::from(10u64.pow(19)))); -} - -pub(crate) fn get_empty_storage() -> InMemoryStorage { - InMemoryStorage::with_system_contracts(hash_bytecode) -} - -// Inserts the contracts into the test environment, bypassing the -// deployer system contract. Besides the reference to storage -// it accepts a `contracts` tuple of information about the contract -// and whether or not it is an account. -fn insert_contracts(raw_storage: &mut InMemoryStorage, contracts: &[ContractsToDeploy]) { - for (contract, address, is_account) in contracts { - let deployer_code_key = get_code_key(address); - raw_storage.set_value(deployer_code_key, hash_bytecode(contract)); - - if *is_account { - let is_account_key = get_is_account_key(address); - raw_storage.set_value(is_account_key, u256_to_h256(1_u32.into())); - } - - raw_storage.store_factory_dep(hash_bytecode(contract), contract.clone()); - } -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs deleted file mode 100644 index 8258abe0685..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/tracing_execution_error.rs +++ /dev/null @@ -1,52 +0,0 @@ -use crate::interface::{TxExecutionMode, TxRevertReason, VmRevertReason}; -use zksync_types::{Execute, H160}; - -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::{ - ExpectedError, TransactionTestInfo, VmTesterBuilder, -}; -use crate::vm_virtual_blocks::tests::utils::{ - get_execute_error_calldata, read_error_contract, BASE_SYSTEM_CONTRACTS, -}; - -#[test] -fn test_tracing_of_execution_errors() { - let contract_address = H160::random(); - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_base_system_smart_contracts(BASE_SYSTEM_CONTRACTS.clone()) - .with_custom_contracts(vec![(read_error_contract(), contract_address, false)]) - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_deployer() - .with_random_rich_accounts(1) - .build(); - - let account = &mut vm.rich_accounts[0]; - - let tx = account.get_l2_tx_for_execute( - Execute { - contract_address, - calldata: get_execute_error_calldata(), - value: Default::default(), - factory_deps: Some(vec![]), - }, - None, - ); - - vm.execute_tx_and_verify(TransactionTestInfo::new_rejected( - tx, - ExpectedError { - revert_reason: TxRevertReason::TxReverted(VmRevertReason::General { - msg: "short".to_string(), - data: vec![ - 8, 195, 121, 160, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 32, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 5, 115, 104, 111, 114, 116, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, - ], - }), - modifier: None, - }, - )); -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs deleted file mode 100644 index 8b3fa0ea291..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/upgrade.rs +++ /dev/null @@ -1,344 +0,0 @@ -use zk_evm_1_3_3::aux_structures::Timestamp; - -use zksync_types::{ - ethabi::Contract, - Execute, COMPLEX_UPGRADER_ADDRESS, CONTRACT_DEPLOYER_ADDRESS, CONTRACT_FORCE_DEPLOYER_ADDRESS, - REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, - {ethabi::Token, Address, ExecuteTransactionCommon, Transaction, H256, U256}, - {get_code_key, get_known_code_key, H160}, -}; - -use zksync_utils::{bytecode::hash_bytecode, bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use zksync_contracts::{deployer_contract, load_contract, load_sys_contract, read_bytecode}; -use crate::interface::storage::WriteStorage; -use zksync_test_account::TxType; - -use crate::interface::{ - ExecutionResult, Halt, TxExecutionMode, VmExecutionMode, VmInterface, VmInterfaceHistoryEnabled, -}; -use crate::vm_latest::HistoryEnabled; -use crate::vm_virtual_blocks::tests::tester::VmTesterBuilder; -use crate::vm_virtual_blocks::tests::utils::verify_required_storage; -use zksync_types::protocol_version::ProtocolUpgradeTxCommonData; - -use super::utils::read_test_contract; - -/// In this test we ensure that the requirements for protocol upgrade transactions are enforced by the bootloader: -/// - This transaction must be the only one in block -/// - If present, this transaction must be the first one in block -#[test] -fn test_protocol_upgrade_is_first() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let bytecode_hash = hash_bytecode(&read_test_contract()); - - // Here we just use some random transaction of protocol upgrade type: - let protocol_upgrade_transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: H160::random(), - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - let normal_l1_transaction = vm.rich_accounts[0] - .get_deploy_tx(&read_test_contract(), None, TxType::L1 { serial_id: 0 }) - .tx; - - let expected_error = - Halt::UnexpectedVMBehavior("Assertion error: Protocol upgrade tx not first".to_string()); - - vm.vm.make_snapshot(); - // Test 1: there must be only one system transaction in block - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error.clone() - } - ); - - // Test 2: the protocol upgrade tx must be the first one in block - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(normal_l1_transaction.clone()); - vm.vm.push_transaction(protocol_upgrade_transaction.clone()); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert_eq!( - result.result, - ExecutionResult::Halt { - reason: expected_error - } - ); - - vm.vm.rollback_to_the_latest_snapshot(); - vm.vm.make_snapshot(); - vm.vm.push_transaction(protocol_upgrade_transaction); - vm.vm.push_transaction(normal_l1_transaction); - - vm.vm.execute(VmExecutionMode::OneTx); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!(!result.result.is_failed()); -} - -/// In this test we try to test how force deployments could be done via protocol upgrade transactions. -#[test] -fn test_force_deploy_upgrade() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - let bytecode_hash = hash_bytecode(&read_test_contract()); - - let known_code_key = get_known_code_key(&bytecode_hash); - // It is generally expected that all the keys will be set as known prior to the protocol upgrade. - storage_view - .borrow_mut() - .set_value(known_code_key, u256_to_h256(1.into())); - drop(storage_view); - - let address_to_deploy = H160::random(); - // Here we just use some random transaction of protocol upgrade type: - let transaction = get_forced_deploy_tx(&[ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash, - // The address on which to deploy the bytecodehash to - address: address_to_deploy, - // Whether to run the constructor on the force deployment - call_constructor: false, - // The value with which to initialize a contract - value: U256::zero(), - // The constructor calldata - input: vec![], - }]); - - vm.vm.push_transaction(transaction); - - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![(bytecode_hash, get_code_key(&address_to_deploy))]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -/// Here we show how the work with the complex upgrader could be done -#[test] -fn test_complex_upgrader() { - let mut vm = VmTesterBuilder::new(HistoryEnabled) - .with_empty_in_memory_storage() - .with_execution_mode(TxExecutionMode::VerifyExecute) - .with_random_rich_accounts(1) - .build(); - - let storage_view = vm.storage.clone(); - - let bytecode_hash = hash_bytecode(&read_complex_upgrade()); - let msg_sender_test_hash = hash_bytecode(&read_msg_sender_test()); - - // Let's assume that the bytecode for the implementation of the complex upgrade - // is already deployed in some address in userspace - let upgrade_impl = H160::random(); - let account_code_key = get_code_key(&upgrade_impl); - - storage_view - .borrow_mut() - .set_value(get_known_code_key(&bytecode_hash), u256_to_h256(1.into())); - storage_view.borrow_mut().set_value( - get_known_code_key(&msg_sender_test_hash), - u256_to_h256(1.into()), - ); - storage_view - .borrow_mut() - .set_value(account_code_key, bytecode_hash); - drop(storage_view); - - vm.vm.state.decommittment_processor.populate( - vec![ - ( - h256_to_u256(bytecode_hash), - bytes_to_be_words(read_complex_upgrade()), - ), - ( - h256_to_u256(msg_sender_test_hash), - bytes_to_be_words(read_msg_sender_test()), - ), - ], - Timestamp(0), - ); - - let address_to_deploy1 = H160::random(); - let address_to_deploy2 = H160::random(); - - let transaction = get_complex_upgrade_tx( - upgrade_impl, - address_to_deploy1, - address_to_deploy2, - bytecode_hash, - ); - - vm.vm.push_transaction(transaction); - let result = vm.vm.execute(VmExecutionMode::OneTx); - assert!( - !result.result.is_failed(), - "The force upgrade was not successful" - ); - - let expected_slots = vec![ - (bytecode_hash, get_code_key(&address_to_deploy1)), - (bytecode_hash, get_code_key(&address_to_deploy2)), - ]; - - // Verify that the bytecode has been set correctly - verify_required_storage(&vm.vm.state, expected_slots); -} - -#[derive(Debug, Clone)] -struct ForceDeployment { - // The bytecode hash to put on an address - bytecode_hash: H256, - // The address on which to deploy the bytecodehash to - address: Address, - // Whether to run the constructor on the force deployment - call_constructor: bool, - // The value with which to initialize a contract - value: U256, - // The constructor calldata - input: Vec, -} - -fn get_forced_deploy_tx(deployment: &[ForceDeployment]) -> Transaction { - let deployer = deployer_contract(); - let contract_function = deployer.function("forceDeployOnAddresses").unwrap(); - - let encoded_deployments: Vec<_> = deployment - .iter() - .map(|deployment| { - Token::Tuple(vec![ - Token::FixedBytes(deployment.bytecode_hash.as_bytes().to_vec()), - Token::Address(deployment.address), - Token::Bool(deployment.call_constructor), - Token::Uint(deployment.value), - Token::Bytes(deployment.input.clone()), - ]) - }) - .collect(); - - let params = [Token::Array(encoded_deployments)]; - - let calldata = contract_function - .encode_input(¶ms) - .expect("failed to encode parameters"); - - let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, - calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -// Returns the transaction that performs a complex protocol upgrade. -// The first param is the address of the implementation of the complex upgrade -// in user-space, while the next 3 params are params of the implenentaiton itself -// For the explanatation for the parameters, please refer to: -// etc/contracts-test-data/complex-upgrade/complex-upgrade.sol -fn get_complex_upgrade_tx( - implementation_address: Address, - address1: Address, - address2: Address, - bytecode_hash: H256, -) -> Transaction { - let impl_contract = get_complex_upgrade_abi(); - let impl_function = impl_contract.function("someComplexUpgrade").unwrap(); - let impl_calldata = impl_function - .encode_input(&[ - Token::Address(address1), - Token::Address(address2), - Token::FixedBytes(bytecode_hash.as_bytes().to_vec()), - ]) - .unwrap(); - - let complex_upgrader = get_complex_upgrader_abi(); - let upgrade_function = complex_upgrader.function("upgrade").unwrap(); - let complex_upgrader_calldata = upgrade_function - .encode_input(&[ - Token::Address(implementation_address), - Token::Bytes(impl_calldata), - ]) - .unwrap(); - - let execute = Execute { - contract_address: COMPLEX_UPGRADER_ADDRESS, - calldata: complex_upgrader_calldata, - factory_deps: None, - value: U256::zero(), - }; - - Transaction { - common_data: ExecuteTransactionCommon::ProtocolUpgrade(ProtocolUpgradeTxCommonData { - sender: CONTRACT_FORCE_DEPLOYER_ADDRESS, - gas_limit: U256::from(200_000_000u32), - gas_per_pubdata_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), - ..Default::default() - }), - execute, - received_timestamp_ms: 0, - raw_bytes: None, - } -} - -fn read_complex_upgrade() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json") -} - -fn read_msg_sender_test() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/msg-sender.sol/MsgSenderTest.json") -} - -fn get_complex_upgrade_abi() -> Contract { - load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/complex-upgrade/complex-upgrade.sol/ComplexUpgrade.json" - ) -} - -fn get_complex_upgrader_abi() -> Contract { - load_sys_contract("ComplexUpgrader") -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs deleted file mode 100644 index e3db232ffce..00000000000 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/tests/utils.rs +++ /dev/null @@ -1,106 +0,0 @@ -use ethabi::Contract; -use once_cell::sync::Lazy; - -use crate::vm_virtual_blocks::tests::tester::InMemoryStorageView; -use zksync_contracts::{ - load_contract, read_bytecode, read_zbin_bytecode, BaseSystemContracts, SystemContractCode, -}; -use crate::interface::storage::{StoragePtr, WriteStorage}; -use zksync_types::utils::storage_key_for_standard_token_balance; -use zksync_types::{AccountTreeId, Address, StorageKey, H256, U256}; -use zksync_utils::bytecode::hash_bytecode; -use zksync_utils::{bytes_to_be_words, h256_to_u256, u256_to_h256}; - -use crate::vm_virtual_blocks::types::internals::ZkSyncVmState; -use crate::vm_virtual_blocks::HistoryMode; - -pub(crate) static BASE_SYSTEM_CONTRACTS: Lazy = - Lazy::new(BaseSystemContracts::load_from_disk); - -// Probably make it a part of vm tester -pub(crate) fn verify_required_storage( - state: &ZkSyncVmState, - required_values: Vec<(H256, StorageKey)>, -) { - for (required_value, key) in required_values { - let current_value = state.storage.storage.read_from_storage(&key); - - assert_eq!( - u256_to_h256(current_value), - required_value, - "Invalid value at key {key:?}" - ); - } -} - -pub(crate) fn verify_required_memory( - state: &ZkSyncVmState, - required_values: Vec<(U256, u32, u32)>, -) { - for (required_value, memory_page, cell) in required_values { - let current_value = state - .memory - .read_slot(memory_page as usize, cell as usize) - .value; - assert_eq!(current_value, required_value); - } -} - -pub(crate) fn get_balance( - token_id: AccountTreeId, - account: &Address, - main_storage: StoragePtr, -) -> U256 { - let key = storage_key_for_standard_token_balance(token_id, account); - h256_to_u256(main_storage.borrow_mut().read_value(&key)) -} - -pub(crate) fn read_test_contract() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json") -} - -pub(crate) fn get_bootloader(test: &str) -> SystemContractCode { - let bootloader_code = read_zbin_bytecode(format!( - "contracts/system-contracts/bootloader/tests/artifacts/{}.yul.zbin", - test - )); - - let bootloader_hash = hash_bytecode(&bootloader_code); - SystemContractCode { - code: bytes_to_be_words(bootloader_code), - hash: bootloader_hash, - } -} - -pub(crate) fn read_nonce_holder_tester() -> Vec { - read_bytecode("etc/contracts-test-data/artifacts-zk/contracts/custom-account/nonce-holder-test.sol/NonceHolderTest.json") -} - -pub(crate) fn read_error_contract() -> Vec { - read_bytecode( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ) -} - -pub(crate) fn get_execute_error_calldata() -> Vec { - let test_contract = load_contract( - "etc/contracts-test-data/artifacts-zk/contracts/error/error.sol/SimpleRequire.json", - ); - - let function = test_contract.function("require_short").unwrap(); - - function - .encode_input(&[]) - .expect("failed to encode parameters") -} - -pub(crate) fn read_many_owners_custom_account_contract() -> (Vec, Contract) { - let path = "etc/contracts-test-data/artifacts-zk/contracts/custom-account/many-owners-custom-account.sol/ManyOwnersCustomAccount.json"; - (read_bytecode(path), load_contract(path)) -} - -pub(crate) fn read_max_depth_contract() -> Vec { - read_zbin_bytecode( - "core/tests/ts-integration/contracts/zkasm/artifacts/deep_stak.zkasm/deep_stak.zkasm.zbin", - ) -} diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs index b42950399f6..c96004163a6 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/types/internals/transaction_data.rs @@ -22,7 +22,7 @@ use crate::vm_virtual_blocks::{ pub(crate) struct TransactionData { pub(crate) tx_type: u8, pub(crate) from: Address, - pub(crate) to: Address, + pub(crate) to: Option
, pub(crate) gas_limit: U256, pub(crate) pubdata_price_limit: U256, pub(crate) max_fee_per_gas: U256, @@ -169,7 +169,7 @@ impl TransactionData { encode(&[Token::Tuple(vec![ Token::Uint(U256::from_big_endian(&self.tx_type.to_be_bytes())), Token::Address(self.from), - Token::Address(self.to), + Token::Address(self.to.unwrap_or_default()), Token::Uint(self.gas_limit), Token::Uint(self.pubdata_price_limit), Token::Uint(self.max_fee_per_gas), @@ -325,7 +325,7 @@ mod tests { let transaction = TransactionData { tx_type: 113, from: Address::random(), - to: Address::random(), + to: Some(Address::random()), gas_limit: U256::from(1u32), pubdata_price_limit: U256::from(1u32), max_fee_per_gas: U256::from(1u32), diff --git a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs index 0ecdd6797f4..28c09590f2a 100644 --- a/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs +++ b/core/lib/multivm/src/versions/vm_virtual_blocks/vm.rs @@ -8,7 +8,6 @@ use crate::{ BytecodeCompressionError, BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, - VmMemoryMetrics, }, vm_latest::HistoryEnabled, vm_virtual_blocks::{ @@ -83,7 +82,7 @@ impl VmInterface for Vm { /// Execute VM with custom tracers. fn inspect( &mut self, - tracer: TracerDispatcher, + tracer: &mut TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { self.inspect_inner(tracer, execution_mode) @@ -96,7 +95,7 @@ impl VmInterface for Vm { /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( &mut self, - tracer: TracerDispatcher, + tracer: &mut TracerDispatcher, tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { @@ -118,12 +117,8 @@ impl VmInterface for Vm { } } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - self.record_vm_memory_metrics_inner() - } - fn finish_batch(&mut self) -> FinishedL1Batch { - let result = self.inspect(TracerDispatcher::default(), VmExecutionMode::Batch); + let result = self.inspect(&mut TracerDispatcher::default(), VmExecutionMode::Batch); let execution_state = self.get_current_execution_state(); let bootloader_memory = self.bootloader_state.bootloader_memory(); FinishedL1Batch { diff --git a/core/lib/multivm/src/vm_instance.rs b/core/lib/multivm/src/vm_instance.rs index cedb4bc8276..ac5693b6161 100644 --- a/core/lib/multivm/src/vm_instance.rs +++ b/core/lib/multivm/src/vm_instance.rs @@ -1,21 +1,30 @@ -use zksync_types::vm::{FastVmMode, VmVersion}; +use std::mem; + +use zksync_types::{vm::VmVersion, Transaction}; +use zksync_vm2::interface::Tracer; use crate::{ glue::history_mode::HistoryMode, interface::{ storage::{ImmutableStorageView, ReadStorage, StoragePtr, StorageView}, + utils::ShadowVm, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceHistoryEnabled, VmMemoryMetrics, }, tracers::TracerDispatcher, - versions::shadow::ShadowVm, + vm_latest::HistoryEnabled, }; -pub type ShadowedFastVm = ShadowVm, H>>; - +/// Enumeration encompassing all supported legacy VM versions. +/// +/// # Important +/// +/// Methods with a tracer arg take the provided tracer, replacing it with the default value. Legacy tracers +/// are adapted for this workflow (previously, tracers were passed by value), so they provide means to extract state after execution +/// if necessary (e.g., using `Arc>`). #[derive(Debug)] -pub enum VmInstance { +pub enum LegacyVmInstance { VmM5(crate::vm_m5::Vm, H>), VmM6(crate::vm_m6::Vm, H>), Vm1_3_2(crate::vm_1_3_2::Vm, H>), @@ -25,74 +34,66 @@ pub enum VmInstance { Vm1_4_1(crate::vm_1_4_1::Vm, H>), Vm1_4_2(crate::vm_1_4_2::Vm, H>), Vm1_5_0(crate::vm_latest::Vm, H>), - VmFast(crate::vm_fast::Vm>), - ShadowedVmFast(ShadowedFastVm), } -macro_rules! dispatch_vm { +macro_rules! dispatch_legacy_vm { ($self:ident.$function:ident($($params:tt)*)) => { match $self { - VmInstance::VmM5(vm) => vm.$function($($params)*), - VmInstance::VmM6(vm) => vm.$function($($params)*), - VmInstance::Vm1_3_2(vm) => vm.$function($($params)*), - VmInstance::VmVirtualBlocks(vm) => vm.$function($($params)*), - VmInstance::VmVirtualBlocksRefundsEnhancement(vm) => vm.$function($($params)*), - VmInstance::VmBoojumIntegration(vm) => vm.$function($($params)*), - VmInstance::Vm1_4_1(vm) => vm.$function($($params)*), - VmInstance::Vm1_4_2(vm) => vm.$function($($params)*), - VmInstance::Vm1_5_0(vm) => vm.$function($($params)*), - VmInstance::VmFast(vm) => vm.$function($($params)*), - VmInstance::ShadowedVmFast(vm) => vm.$function($($params)*), + Self::VmM5(vm) => vm.$function($($params)*), + Self::VmM6(vm) => vm.$function($($params)*), + Self::Vm1_3_2(vm) => vm.$function($($params)*), + Self::VmVirtualBlocks(vm) => vm.$function($($params)*), + Self::VmVirtualBlocksRefundsEnhancement(vm) => vm.$function($($params)*), + Self::VmBoojumIntegration(vm) => vm.$function($($params)*), + Self::Vm1_4_1(vm) => vm.$function($($params)*), + Self::Vm1_4_2(vm) => vm.$function($($params)*), + Self::Vm1_5_0(vm) => vm.$function($($params)*), } }; } -impl VmInterface for VmInstance { +impl VmInterface for LegacyVmInstance { type TracerDispatcher = TracerDispatcher, H>; /// Push tx into memory for the future execution - fn push_transaction(&mut self, tx: zksync_types::Transaction) { - dispatch_vm!(self.push_transaction(tx)) + fn push_transaction(&mut self, tx: Transaction) { + dispatch_legacy_vm!(self.push_transaction(tx)) } /// Execute next transaction with custom tracers fn inspect( &mut self, - dispatcher: Self::TracerDispatcher, + dispatcher: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs { - dispatch_vm!(self.inspect(dispatcher.into(), execution_mode)) + dispatch_legacy_vm!(self.inspect(&mut mem::take(dispatcher).into(), execution_mode)) } fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { - dispatch_vm!(self.start_new_l2_block(l2_block_env)) + dispatch_legacy_vm!(self.start_new_l2_block(l2_block_env)) } /// Inspect transaction with optional bytecode compression. fn inspect_transaction_with_bytecode_compression( &mut self, - dispatcher: Self::TracerDispatcher, - tx: zksync_types::Transaction, + dispatcher: &mut Self::TracerDispatcher, + tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { - dispatch_vm!(self.inspect_transaction_with_bytecode_compression( - dispatcher.into(), + dispatch_legacy_vm!(self.inspect_transaction_with_bytecode_compression( + &mut mem::take(dispatcher).into(), tx, with_compression )) } - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { - dispatch_vm!(self.record_vm_memory_metrics()) - } - /// Return the results of execution of all batch fn finish_batch(&mut self) -> FinishedL1Batch { - dispatch_vm!(self.finish_batch()) + dispatch_legacy_vm!(self.finish_batch()) } } -impl VmFactory> for VmInstance { +impl VmFactory> for LegacyVmInstance { fn new( batch_env: L1BatchEnv, system_env: SystemEnv, @@ -104,21 +105,21 @@ impl VmFactory> for VmInstance VmInterfaceHistoryEnabled for VmInstance { +impl VmInterfaceHistoryEnabled for LegacyVmInstance { fn make_snapshot(&mut self) { - dispatch_vm!(self.make_snapshot()) + dispatch_legacy_vm!(self.make_snapshot()); } fn rollback_to_the_latest_snapshot(&mut self) { - dispatch_vm!(self.rollback_to_the_latest_snapshot()) + dispatch_legacy_vm!(self.rollback_to_the_latest_snapshot()); } fn pop_snapshot_no_rollback(&mut self) { - dispatch_vm!(self.pop_snapshot_no_rollback()) + dispatch_legacy_vm!(self.pop_snapshot_no_rollback()); } } -impl VmInstance { +impl LegacyVmInstance { pub fn new_with_specific_version( l1_batch_env: L1BatchEnv, system_env: SystemEnv, @@ -133,7 +134,7 @@ impl VmInstance { storage_view, crate::vm_m5::vm_instance::MultiVMSubversion::V1, ); - VmInstance::VmM5(vm) + Self::VmM5(vm) } VmVersion::M5WithRefunds => { let vm = crate::vm_m5::Vm::new_with_subversion( @@ -142,7 +143,7 @@ impl VmInstance { storage_view, crate::vm_m5::vm_instance::MultiVMSubversion::V2, ); - VmInstance::VmM5(vm) + Self::VmM5(vm) } VmVersion::M6Initial => { let vm = crate::vm_m6::Vm::new_with_subversion( @@ -151,7 +152,7 @@ impl VmInstance { storage_view, crate::vm_m6::vm_instance::MultiVMSubversion::V1, ); - VmInstance::VmM6(vm) + Self::VmM6(vm) } VmVersion::M6BugWithCompressionFixed => { let vm = crate::vm_m6::Vm::new_with_subversion( @@ -160,33 +161,33 @@ impl VmInstance { storage_view, crate::vm_m6::vm_instance::MultiVMSubversion::V2, ); - VmInstance::VmM6(vm) + Self::VmM6(vm) } VmVersion::Vm1_3_2 => { let vm = crate::vm_1_3_2::Vm::new(l1_batch_env, system_env, storage_view); - VmInstance::Vm1_3_2(vm) + Self::Vm1_3_2(vm) } VmVersion::VmVirtualBlocks => { let vm = crate::vm_virtual_blocks::Vm::new(l1_batch_env, system_env, storage_view); - VmInstance::VmVirtualBlocks(vm) + Self::VmVirtualBlocks(vm) } VmVersion::VmVirtualBlocksRefundsEnhancement => { let vm = crate::vm_refunds_enhancement::Vm::new(l1_batch_env, system_env, storage_view); - VmInstance::VmVirtualBlocksRefundsEnhancement(vm) + Self::VmVirtualBlocksRefundsEnhancement(vm) } VmVersion::VmBoojumIntegration => { let vm = crate::vm_boojum_integration::Vm::new(l1_batch_env, system_env, storage_view); - VmInstance::VmBoojumIntegration(vm) + Self::VmBoojumIntegration(vm) } VmVersion::Vm1_4_1 => { let vm = crate::vm_1_4_1::Vm::new(l1_batch_env, system_env, storage_view); - VmInstance::Vm1_4_1(vm) + Self::Vm1_4_1(vm) } VmVersion::Vm1_4_2 => { let vm = crate::vm_1_4_2::Vm::new(l1_batch_env, system_env, storage_view); - VmInstance::Vm1_4_2(vm) + Self::Vm1_4_2(vm) } VmVersion::Vm1_5_0SmallBootloaderMemory => { let vm = crate::vm_latest::Vm::new_with_subversion( @@ -195,7 +196,7 @@ impl VmInstance { storage_view, crate::vm_latest::MultiVMSubversion::SmallBootloaderMemory, ); - VmInstance::Vm1_5_0(vm) + Self::Vm1_5_0(vm) } VmVersion::Vm1_5_0IncreasedBootloaderMemory => { let vm = crate::vm_latest::Vm::new_with_subversion( @@ -204,31 +205,126 @@ impl VmInstance { storage_view, crate::vm_latest::MultiVMSubversion::IncreasedBootloaderMemory, ); - VmInstance::Vm1_5_0(vm) + Self::Vm1_5_0(vm) } } } - /// Creates a VM that may use the fast VM depending on the protocol version in `system_env` and `mode`. - pub fn maybe_fast( + /// Returns memory-related oracle metrics. + pub fn record_vm_memory_metrics(&self) -> VmMemoryMetrics { + dispatch_legacy_vm!(self.record_vm_memory_metrics()) + } +} + +/// Fast VM shadowed by the latest legacy VM. +pub type ShadowedFastVm = ShadowVm< + S, + crate::vm_latest::Vm, HistoryEnabled>, + crate::vm_fast::Vm, Tr>, +>; + +/// Fast VM variants. +#[derive(Debug)] +pub enum FastVmInstance { + /// Fast VM running in isolation. + Fast(crate::vm_fast::Vm, Tr>), + /// Fast VM shadowed by the latest legacy VM. + Shadowed(ShadowedFastVm), +} + +macro_rules! dispatch_fast_vm { + ($self:ident.$function:ident($($params:tt)*)) => { + match $self { + Self::Fast(vm) => vm.$function($($params)*), + Self::Shadowed(vm) => vm.$function($($params)*), + } + }; +} + +impl VmInterface for FastVmInstance { + type TracerDispatcher = ( + crate::vm_latest::TracerDispatcher, HistoryEnabled>, + Tr, + ); + + fn push_transaction(&mut self, tx: Transaction) { + dispatch_fast_vm!(self.push_transaction(tx)); + } + + fn inspect( + &mut self, + tracer: &mut Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + match self { + Self::Fast(vm) => vm.inspect(&mut tracer.1, execution_mode), + Self::Shadowed(vm) => vm.inspect(tracer, execution_mode), + } + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + dispatch_fast_vm!(self.start_new_l2_block(l2_block_env)); + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + tracer: &mut Self::TracerDispatcher, + tx: Transaction, + with_compression: bool, + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { + match self { + Self::Fast(vm) => vm.inspect_transaction_with_bytecode_compression( + &mut tracer.1, + tx, + with_compression, + ), + Self::Shadowed(vm) => { + vm.inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) + } + } + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + dispatch_fast_vm!(self.finish_batch()) + } +} + +impl VmInterfaceHistoryEnabled + for FastVmInstance +{ + fn make_snapshot(&mut self) { + dispatch_fast_vm!(self.make_snapshot()); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + dispatch_fast_vm!(self.rollback_to_the_latest_snapshot()); + } + + fn pop_snapshot_no_rollback(&mut self) { + dispatch_fast_vm!(self.pop_snapshot_no_rollback()); + } +} + +impl FastVmInstance { + /// Creates an isolated fast VM. + pub fn fast( l1_batch_env: L1BatchEnv, system_env: SystemEnv, storage_view: StoragePtr>, - mode: FastVmMode, ) -> Self { - let vm_version = system_env.version.into(); - match vm_version { - VmVersion::Vm1_5_0IncreasedBootloaderMemory => match mode { - FastVmMode::Old => Self::new(l1_batch_env, system_env, storage_view), - FastVmMode::New => { - let storage = ImmutableStorageView::new(storage_view); - Self::VmFast(crate::vm_fast::Vm::new(l1_batch_env, system_env, storage)) - } - FastVmMode::Shadow => { - Self::ShadowedVmFast(ShadowVm::new(l1_batch_env, system_env, storage_view)) - } - }, - _ => Self::new(l1_batch_env, system_env, storage_view), - } + Self::Fast(crate::vm_fast::Vm::new( + l1_batch_env, + system_env, + storage_view, + )) + } + + /// Creates a shadowed fast VM. + pub fn shadowed( + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + storage_view: StoragePtr>, + ) -> Self { + Self::Shadowed(ShadowedFastVm::new(l1_batch_env, system_env, storage_view)) } } diff --git a/core/lib/object_store/src/file.rs b/core/lib/object_store/src/file.rs index decba534d23..308cd65427f 100644 --- a/core/lib/object_store/src/file.rs +++ b/core/lib/object_store/src/file.rs @@ -43,6 +43,7 @@ impl FileBackedObjectStore { Bucket::ProofsFri, Bucket::StorageSnapshot, Bucket::TeeVerifierInput, + Bucket::VmDumps, ] { let bucket_path = format!("{base_dir}/{bucket}"); fs::create_dir_all(&bucket_path).await?; diff --git a/core/lib/object_store/src/objects.rs b/core/lib/object_store/src/objects.rs index f5bb3706d9d..ff5fae2a81f 100644 --- a/core/lib/object_store/src/objects.rs +++ b/core/lib/object_store/src/objects.rs @@ -95,7 +95,6 @@ where type Key<'a> = SnapshotStorageLogsStorageKey; fn encode_key(key: Self::Key<'_>) -> String { - // FIXME: should keys be separated by version? format!( "snapshot_l1_batch_{}_storage_logs_part_{:0>4}.proto.gzip", key.l1_batch_number, key.chunk_id diff --git a/core/lib/object_store/src/raw.rs b/core/lib/object_store/src/raw.rs index 3c5a89f160a..740e8d76e24 100644 --- a/core/lib/object_store/src/raw.rs +++ b/core/lib/object_store/src/raw.rs @@ -20,6 +20,7 @@ pub enum Bucket { StorageSnapshot, DataAvailability, TeeVerifierInput, + VmDumps, } impl Bucket { @@ -39,6 +40,7 @@ impl Bucket { Self::StorageSnapshot => "storage_logs_snapshots", Self::DataAvailability => "data_availability", Self::TeeVerifierInput => "tee_verifier_inputs", + Self::VmDumps => "vm_dumps", } } } diff --git a/core/lib/protobuf_config/src/api.rs b/core/lib/protobuf_config/src/api.rs index f4718c9f796..a0c3825228a 100644 --- a/core/lib/protobuf_config/src/api.rs +++ b/core/lib/protobuf_config/src/api.rs @@ -84,6 +84,7 @@ impl ProtoRepr for proto::Web3JsonRpc { &self.estimate_gas_acceptable_overestimation, ) .context("acceptable_overestimation")?, + estimate_gas_optimize_search: self.estimate_gas_optimize_search.unwrap_or(false), max_tx_size: required(&self.max_tx_size) .and_then(|x| Ok((*x).try_into()?)) .context("max_tx_size")?, @@ -167,6 +168,7 @@ impl ProtoRepr for proto::Web3JsonRpc { estimate_gas_acceptable_overestimation: Some( this.estimate_gas_acceptable_overestimation, ), + estimate_gas_optimize_search: Some(this.estimate_gas_optimize_search), max_tx_size: Some(this.max_tx_size.try_into().unwrap()), vm_execution_cache_misses_limit: this .vm_execution_cache_misses_limit diff --git a/core/lib/protobuf_config/src/consensus.rs b/core/lib/protobuf_config/src/consensus.rs index f5eb5c5b2f1..81cad437fe4 100644 --- a/core/lib/protobuf_config/src/consensus.rs +++ b/core/lib/protobuf_config/src/consensus.rs @@ -71,6 +71,13 @@ impl ProtoRepr for proto::GenesisSpec { .map(|x| parse_h160(x)) .transpose() .context("registry_address")?, + seed_peers: self + .seed_peers + .iter() + .enumerate() + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("seed_peers")?, }) } fn build(this: &Self::Type) -> Self { @@ -81,6 +88,11 @@ impl ProtoRepr for proto::GenesisSpec { attesters: this.attesters.iter().map(ProtoRepr::build).collect(), leader: Some(this.leader.0.clone()), registry_address: this.registry_address.map(|a| format!("{:?}", a)), + seed_peers: this + .seed_peers + .iter() + .map(|(k, v)| proto::NodeAddr::build(&(k.clone(), v.clone()))) + .collect(), } } } @@ -99,15 +111,25 @@ impl ProtoRepr for proto::RpcConfig { } } +impl ProtoRepr for proto::NodeAddr { + type Type = (NodePublicKey, Host); + fn read(&self) -> anyhow::Result { + Ok(( + NodePublicKey(required(&self.key).context("key")?.clone()), + Host(required(&self.addr).context("addr")?.clone()), + )) + } + fn build(this: &Self::Type) -> Self { + Self { + key: Some(this.0 .0.clone()), + addr: Some(this.1 .0.clone()), + } + } +} + impl ProtoRepr for proto::Config { type Type = ConsensusConfig; fn read(&self) -> anyhow::Result { - let read_addr = |e: &proto::NodeAddr| { - let key = NodePublicKey(required(&e.key).context("key")?.clone()); - let addr = Host(required(&e.addr).context("addr")?.clone()); - anyhow::Ok((key, addr)) - }; - let max_payload_size = required(&self.max_payload_size) .and_then(|x| Ok((*x).try_into()?)) .context("max_payload_size")?; @@ -144,10 +166,17 @@ impl ProtoRepr for proto::Config { .gossip_static_outbound .iter() .enumerate() - .map(|(i, e)| read_addr(e).context(i)) - .collect::>()?, + .map(|(i, e)| e.read().context(i)) + .collect::>() + .context("gossip_static_outbound")?, genesis_spec: read_optional_repr(&self.genesis_spec), rpc: read_optional_repr(&self.rpc_config), + debug_page_addr: self + .debug_page_addr + .as_ref() + .map(|x| Ok::<_, anyhow::Error>(x.parse()?)) + .transpose() + .context("debug_page_addr")?, }) } @@ -168,13 +197,11 @@ impl ProtoRepr for proto::Config { gossip_static_outbound: this .gossip_static_outbound .iter() - .map(|x| proto::NodeAddr { - key: Some(x.0 .0.clone()), - addr: Some(x.1 .0.clone()), - }) + .map(|(k, v)| proto::NodeAddr::build(&(k.clone(), v.clone()))) .collect(), genesis_spec: this.genesis_spec.as_ref().map(ProtoRepr::build), rpc_config: this.rpc.as_ref().map(ProtoRepr::build), + debug_page_addr: this.debug_page_addr.as_ref().map(|x| x.to_string()), } } } diff --git a/core/lib/protobuf_config/src/da_client.rs b/core/lib/protobuf_config/src/da_client.rs index ba6b8a0695b..597557db886 100644 --- a/core/lib/protobuf_config/src/da_client.rs +++ b/core/lib/protobuf_config/src/da_client.rs @@ -3,7 +3,7 @@ use zksync_config::{ configs::{ da_client::{ eigen_da::EigenDAConfig, - DAClient::{Avail, EigenDA, ObjectStore}, + DAClientConfig::{Avail, EigenDA, ObjectStore}, }, {self}, }, @@ -27,7 +27,6 @@ impl ProtoRepr for proto::DataAvailabilityClient { bridge_api_url: required(&conf.bridge_api_url) .context("bridge_api_url")? .clone(), - seed: required(&conf.seed).context("seed")?.clone(), app_id: *required(&conf.app_id).context("app_id")?, timeout: *required(&conf.timeout).context("timeout")? as usize, max_retries: *required(&conf.max_retries).context("max_retries")? as usize, @@ -44,17 +43,16 @@ impl ProtoRepr for proto::DataAvailabilityClient { }), }; - Ok(configs::DAClientConfig { client }) + Ok(client) } fn build(this: &Self::Type) -> Self { - match &this.client { + match &this { Avail(config) => Self { config: Some(proto::data_availability_client::Config::Avail( proto::AvailConfig { api_node_url: Some(config.api_node_url.clone()), bridge_api_url: Some(config.bridge_api_url.clone()), - seed: Some(config.seed.clone()), app_id: Some(config.app_id), timeout: Some(config.timeout as u64), max_retries: Some(config.max_retries as u64), diff --git a/core/lib/protobuf_config/src/en.rs b/core/lib/protobuf_config/src/en.rs index 9c07d1d3929..9d1a3931060 100644 --- a/core/lib/protobuf_config/src/en.rs +++ b/core/lib/protobuf_config/src/en.rs @@ -1,4 +1,7 @@ -use std::{num::NonZeroUsize, str::FromStr}; +use std::{ + num::{NonZeroU64, NonZeroUsize}, + str::FromStr, +}; use anyhow::Context; use zksync_basic_types::{url::SensitiveUrl, L1ChainId, L2ChainId}; @@ -36,6 +39,9 @@ impl ProtoRepr for proto::ExternalNode { .as_ref() .map(|a| a.parse().context("gateway_url")) .transpose()?, + bridge_addresses_refresh_interval_sec: self + .bridge_addresses_refresh_interval_sec + .and_then(NonZeroU64::new), }) } @@ -55,6 +61,9 @@ impl ProtoRepr for proto::ExternalNode { .gateway_url .as_ref() .map(|a| a.expose_str().to_string()), + bridge_addresses_refresh_interval_sec: this + .bridge_addresses_refresh_interval_sec + .map(|a| a.get()), } } } diff --git a/core/lib/protobuf_config/src/eth.rs b/core/lib/protobuf_config/src/eth.rs index 273b7f4e344..c1d95bd30d2 100644 --- a/core/lib/protobuf_config/src/eth.rs +++ b/core/lib/protobuf_config/src/eth.rs @@ -115,6 +115,9 @@ impl ProtoRepr for proto::Sender { .parse(), tx_aggregation_only_prove_and_execute: self.tx_aggregation_paused.unwrap_or(false), tx_aggregation_paused: self.tx_aggregation_only_prove_and_execute.unwrap_or(false), + time_in_mempool_in_l1_blocks_cap: self + .time_in_mempool_in_l1_blocks_cap + .unwrap_or(Self::Type::default_time_in_mempool_in_l1_blocks_cap()), }) } @@ -147,6 +150,7 @@ impl ProtoRepr for proto::Sender { ), tx_aggregation_only_prove_and_execute: Some(this.tx_aggregation_only_prove_and_execute), tx_aggregation_paused: Some(this.tx_aggregation_paused), + time_in_mempool_in_l1_blocks_cap: Some(this.time_in_mempool_in_l1_blocks_cap), } } } @@ -161,9 +165,9 @@ impl ProtoRepr for proto::GasAdjuster { .and_then(|x| Ok((*x).try_into()?)) .context("max_base_fee_samples")?, pricing_formula_parameter_a: *required(&self.pricing_formula_parameter_a) - .context("pricing_formula_parameter_a")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_a()), pricing_formula_parameter_b: *required(&self.pricing_formula_parameter_b) - .context("pricing_formula_parameter_b")?, + .unwrap_or(&Self::Type::default_pricing_formula_parameter_b()), internal_l1_pricing_multiplier: *required(&self.internal_l1_pricing_multiplier) .context("internal_l1_pricing_multiplier")?, internal_enforced_l1_gas_price: self.internal_enforced_l1_gas_price, diff --git a/core/lib/protobuf_config/src/lib.rs b/core/lib/protobuf_config/src/lib.rs index a4822edbe8e..a0f4d45214f 100644 --- a/core/lib/protobuf_config/src/lib.rs +++ b/core/lib/protobuf_config/src/lib.rs @@ -77,7 +77,10 @@ pub fn decode_yaml_repr( ) -> anyhow::Result { let yaml = std::fs::read_to_string(path).with_context(|| path.display().to_string())?; let d = serde_yaml::Deserializer::from_str(&yaml); - let this: T = zksync_protobuf::serde::deserialize_proto_with_options(d, deny_unknown_fields)?; + let this: T = zksync_protobuf::serde::Deserialize { + deny_unknown_fields, + } + .proto(d)?; this.read() } diff --git a/core/lib/protobuf_config/src/proto/config/api.proto b/core/lib/protobuf_config/src/proto/config/api.proto index e08677adc44..68475e442fd 100644 --- a/core/lib/protobuf_config/src/proto/config/api.proto +++ b/core/lib/protobuf_config/src/proto/config/api.proto @@ -40,6 +40,8 @@ message Web3JsonRpc { repeated MaxResponseSizeOverride max_response_body_size_overrides = 31; repeated string api_namespaces = 32; // Optional, if empty all namespaces are available optional bool extended_api_tracing = 33; // optional, default false + optional bool estimate_gas_optimize_search = 34; // optional, default false + reserved 15; reserved "l1_to_l2_transactions_compatibility_mode"; reserved 11; reserved "request_timeout"; reserved 12; reserved "account_pks"; diff --git a/core/lib/protobuf_config/src/proto/config/da_client.proto b/core/lib/protobuf_config/src/proto/config/da_client.proto index 9db95bf811b..04e00e985e7 100644 --- a/core/lib/protobuf_config/src/proto/config/da_client.proto +++ b/core/lib/protobuf_config/src/proto/config/da_client.proto @@ -7,10 +7,10 @@ import "zksync/config/object_store.proto"; message AvailConfig { optional string api_node_url = 1; optional string bridge_api_url = 2; - optional string seed = 3; optional uint32 app_id = 4; optional uint64 timeout = 5; optional uint64 max_retries = 6; + reserved 3; reserved "seed"; } message EigenDaConfig { diff --git a/core/lib/protobuf_config/src/proto/config/en.proto b/core/lib/protobuf_config/src/proto/config/en.proto index d8a13d31d4b..69412704ea0 100644 --- a/core/lib/protobuf_config/src/proto/config/en.proto +++ b/core/lib/protobuf_config/src/proto/config/en.proto @@ -10,4 +10,5 @@ message ExternalNode { optional uint64 main_node_rate_limit_rps = 6; // optional optional config.genesis.L1BatchCommitDataGeneratorMode l1_batch_commit_data_generator_mode = 7; // optional, default to rollup optional string gateway_url = 8; // optional + optional uint64 bridge_addresses_refresh_interval_sec = 9; // optional } diff --git a/core/lib/protobuf_config/src/proto/config/eth_sender.proto b/core/lib/protobuf_config/src/proto/config/eth_sender.proto index b102a08be04..6438573e08d 100644 --- a/core/lib/protobuf_config/src/proto/config/eth_sender.proto +++ b/core/lib/protobuf_config/src/proto/config/eth_sender.proto @@ -48,6 +48,7 @@ message Sender { reserved 19; reserved "proof_loading_mode"; optional bool tx_aggregation_paused = 20; // required optional bool tx_aggregation_only_prove_and_execute = 21; // required + optional uint32 time_in_mempool_in_l1_blocks_cap = 22; // optional } message GasAdjuster { diff --git a/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto index 7b505aa3bcf..9aabf6e3483 100644 --- a/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto +++ b/core/lib/protobuf_config/src/proto/config/prover_job_monitor.proto @@ -17,4 +17,5 @@ message ProverJobMonitor { optional uint64 prover_queue_reporter_run_interval_ms = 12; // optional; ms optional uint64 witness_generator_queue_reporter_run_interval_ms = 13; // optional; ms optional uint64 witness_job_queuer_run_interval_ms = 14; // optional; ms + optional uint32 http_port = 15; // required; u32 } diff --git a/core/lib/protobuf_config/src/proto/config/secrets.proto b/core/lib/protobuf_config/src/proto/config/secrets.proto index b711d81d575..17b915b3f08 100644 --- a/core/lib/protobuf_config/src/proto/config/secrets.proto +++ b/core/lib/protobuf_config/src/proto/config/secrets.proto @@ -19,9 +19,19 @@ message ConsensusSecrets { optional string attester_key = 3; // required for attester nodes; AttesterSecretKey } +message AvailSecret { + optional string seed_phrase = 1; +} + +message DataAvailabilitySecrets { + oneof da_secrets { + AvailSecret avail = 1; + } +} + message Secrets { optional DatabaseSecrets database = 1; // optional secrets for database optional L1Secrets l1 = 2; // optional secrets for l1 communication optional ConsensusSecrets consensus = 3; // optional secrets for consensus + optional DataAvailabilitySecrets da = 4; // optional secrets for data availability } - diff --git a/core/lib/protobuf_config/src/proto/core/consensus.proto b/core/lib/protobuf_config/src/proto/core/consensus.proto index 835ead1ab65..92527df739a 100644 --- a/core/lib/protobuf_config/src/proto/core/consensus.proto +++ b/core/lib/protobuf_config/src/proto/core/consensus.proto @@ -31,10 +31,10 @@ package zksync.core.consensus; import "zksync/std.proto"; -// (public key, ip address) of a gossip network node. +// (public key, host address) of a gossip network node. message NodeAddr { optional string key = 1; // required; NodePublicKey - optional string addr = 2; // required; IpAddr + optional string addr = 2; // required; Host } // Weighted member of a validator committee. @@ -58,6 +58,7 @@ message GenesisSpec { repeated WeightedAttester attesters = 5; // can be empty; attester committee. // Currently not in consensus genesis, but still a part of the global configuration. optional string registry_address = 6; // optional; H160 + repeated NodeAddr seed_peers = 7; } // Per peer connection RPC rate limits. @@ -102,5 +103,9 @@ message Config { // RPC rate limits configuration. // If missing, defaults are used. optional RpcConfig rpc_config = 9; // optional + + // IP:port to expose the debug page. + // Use `127.0.0.1:` to only allow local access to the page. + optional string debug_page_addr = 11; // required; IpAddr } diff --git a/core/lib/protobuf_config/src/prover_job_monitor.rs b/core/lib/protobuf_config/src/prover_job_monitor.rs index a1c5a7c0599..a174d088240 100644 --- a/core/lib/protobuf_config/src/prover_job_monitor.rs +++ b/core/lib/protobuf_config/src/prover_job_monitor.rs @@ -95,6 +95,9 @@ impl ProtoRepr for proto::ProverJobMonitor { .or_else(|| Some(Self::Type::default_witness_job_queuer_run_interval_ms())), ) .context("witness_job_queuer_run_interval_ms")?, + http_port: required(&self.http_port) + .and_then(|x| Ok((*x).try_into()?)) + .context("http_port")?, }) } @@ -126,6 +129,7 @@ impl ProtoRepr for proto::ProverJobMonitor { this.witness_generator_queue_reporter_run_interval_ms, ), witness_job_queuer_run_interval_ms: Some(this.witness_job_queuer_run_interval_ms), + http_port: Some(this.http_port.into()), } } } diff --git a/core/lib/protobuf_config/src/secrets.rs b/core/lib/protobuf_config/src/secrets.rs index 7d10bef88a5..58735148007 100644 --- a/core/lib/protobuf_config/src/secrets.rs +++ b/core/lib/protobuf_config/src/secrets.rs @@ -2,15 +2,22 @@ use std::str::FromStr; use anyhow::Context; use secrecy::ExposeSecret; -use zksync_basic_types::url::SensitiveUrl; +use zksync_basic_types::{seed_phrase::SeedPhrase, url::SensitiveUrl}; use zksync_config::configs::{ consensus::{AttesterSecretKey, ConsensusSecrets, NodeSecretKey, ValidatorSecretKey}, - secrets::Secrets, + da_client::avail::AvailSecrets, + secrets::{DataAvailabilitySecrets, Secrets}, DatabaseSecrets, L1Secrets, }; use zksync_protobuf::{required, ProtoRepr}; -use crate::{proto::secrets as proto, read_optional_repr}; +use crate::{ + proto::{ + secrets as proto, + secrets::{data_availability_secrets::DaSecrets, AvailSecret}, + }, + read_optional_repr, +}; impl ProtoRepr for proto::Secrets { type Type = Secrets; @@ -20,6 +27,7 @@ impl ProtoRepr for proto::Secrets { consensus: read_optional_repr(&self.consensus), database: read_optional_repr(&self.database), l1: read_optional_repr(&self.l1), + data_availability: read_optional_repr(&self.da), }) } @@ -28,6 +36,7 @@ impl ProtoRepr for proto::Secrets { database: this.database.as_ref().map(ProtoRepr::build), l1: this.l1.as_ref().map(ProtoRepr::build), consensus: this.consensus.as_ref().map(ProtoRepr::build), + da: this.data_availability.as_ref().map(ProtoRepr::build), } } } @@ -87,6 +96,53 @@ impl ProtoRepr for proto::L1Secrets { } } +impl ProtoRepr for proto::DataAvailabilitySecrets { + type Type = DataAvailabilitySecrets; + + fn read(&self) -> anyhow::Result { + let secrets = required(&self.da_secrets).context("config")?; + + let client = match secrets { + DaSecrets::Avail(avail_secret) => DataAvailabilitySecrets::Avail(AvailSecrets { + seed_phrase: Some( + SeedPhrase::from_str( + required(&avail_secret.seed_phrase).context("seed_phrase")?, + ) + .unwrap(), + ), + }), + }; + + Ok(client) + } + + fn build(this: &Self::Type) -> Self { + let secrets = match &this { + DataAvailabilitySecrets::Avail(config) => { + let seed_phrase = if config.seed_phrase.is_some() { + Some( + config + .clone() + .seed_phrase + .unwrap() + .0 + .expose_secret() + .to_string(), + ) + } else { + None + }; + + Some(DaSecrets::Avail(AvailSecret { seed_phrase })) + } + }; + + Self { + da_secrets: secrets, + } + } +} + impl ProtoRepr for proto::ConsensusSecrets { type Type = ConsensusSecrets; fn read(&self) -> anyhow::Result { diff --git a/core/lib/prover_interface/Cargo.toml b/core/lib/prover_interface/Cargo.toml index 8c73c2c6ac3..889b80b4fbe 100644 --- a/core/lib/prover_interface/Cargo.toml +++ b/core/lib/prover_interface/Cargo.toml @@ -20,7 +20,7 @@ circuit_sequencer_api_1_5_0.workspace = true serde.workspace = true strum = { workspace = true, features = ["derive"] } -serde_with = { workspace = true, features = ["base64"] } +serde_with = { workspace = true, features = ["base64", "hex"] } chrono = { workspace = true, features = ["serde"] } [dev-dependencies] diff --git a/core/lib/prover_interface/src/api.rs b/core/lib/prover_interface/src/api.rs index bc95345bbba..776cd3141cb 100644 --- a/core/lib/prover_interface/src/api.rs +++ b/core/lib/prover_interface/src/api.rs @@ -2,6 +2,7 @@ //! This module defines the types used in the API. use serde::{Deserialize, Serialize}; +use serde_with::{hex::Hex, serde_as}; use zksync_types::{ protocol_version::{L1VerifierConfig, ProtocolSemanticVersion}, tee_types::TeeType, @@ -71,8 +72,11 @@ pub struct VerifyProofRequest(pub Box); #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct SubmitTeeProofRequest(pub Box); +#[serde_as] #[derive(Debug, PartialEq, Serialize, Deserialize)] pub struct RegisterTeeAttestationRequest { + #[serde_as(as = "Hex")] pub attestation: Vec, + #[serde_as(as = "Hex")] pub pubkey: Vec, } diff --git a/core/lib/prover_interface/src/outputs.rs b/core/lib/prover_interface/src/outputs.rs index 9672bfb2142..60a9eaba760 100644 --- a/core/lib/prover_interface/src/outputs.rs +++ b/core/lib/prover_interface/src/outputs.rs @@ -2,6 +2,7 @@ use core::fmt; use circuit_sequencer_api_1_5_0::proof::FinalProof; use serde::{Deserialize, Serialize}; +use serde_with::{hex::Hex, serde_as}; use zksync_object_store::{serialize_using_bincode, Bucket, StoredObject}; use zksync_types::{protocol_version::ProtocolSemanticVersion, tee_types::TeeType, L1BatchNumber}; @@ -14,14 +15,18 @@ pub struct L1BatchProofForL1 { } /// A "final" TEE proof that can be sent to the L1 contract. +#[serde_as] #[derive(Clone, PartialEq, Serialize, Deserialize)] pub struct L1BatchTeeProofForL1 { // signature generated within the TEE enclave, using the privkey corresponding to the pubkey + #[serde_as(as = "Hex")] pub signature: Vec, // pubkey used for signature verification; each key pair is attested by the TEE attestation // stored in the db + #[serde_as(as = "Hex")] pub pubkey: Vec, // data that was signed + #[serde_as(as = "Hex")] pub proof: Vec, // type of TEE used for attestation pub tee_type: TeeType, diff --git a/core/lib/prover_interface/tests/job_serialization.rs b/core/lib/prover_interface/tests/job_serialization.rs index a2aee0c2733..ead59749abe 100644 --- a/core/lib/prover_interface/tests/job_serialization.rs +++ b/core/lib/prover_interface/tests/job_serialization.rs @@ -167,9 +167,9 @@ fn test_proof_request_serialization() { #[test] fn test_tee_proof_request_serialization() { let tee_proof_str = r#"{ - "signature": [ 0, 1, 2, 3, 4 ], - "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ], + "signature": "0001020304", + "pubkey": "0506070809", + "proof": "0A0B0C0D0E", "tee_type": "sgx" }"#; let tee_proof_result = serde_json::from_str::(tee_proof_str).unwrap(); diff --git a/core/lib/tee_verifier/src/lib.rs b/core/lib/tee_verifier/src/lib.rs index 8728a4e5274..68b25416d66 100644 --- a/core/lib/tee_verifier/src/lib.rs +++ b/core/lib/tee_verifier/src/lib.rs @@ -14,10 +14,11 @@ use zksync_merkle_tree::{ use zksync_multivm::{ interface::{ storage::{InMemoryStorage, ReadStorage, StorageView}, - FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceHistoryEnabled, + FinishedL1Batch, L2BlockEnv, VmFactory, VmInterface, VmInterfaceExt, + VmInterfaceHistoryEnabled, }, vm_latest::HistoryEnabled, - VmInstance, + LegacyVmInstance, }; use zksync_prover_interface::inputs::{ StorageLogMetadata, V1TeeVerifierInput, WitnessInputMerklePaths, @@ -69,7 +70,7 @@ impl Verify for V1TeeVerifierInput { let storage_view = Rc::new(RefCell::new(StorageView::new(&raw_storage))); let batch_number = self.l1_batch_env.number; - let vm = VmInstance::new(self.l1_batch_env, self.system_env, storage_view); + let vm = LegacyVmInstance::new(self.l1_batch_env, self.system_env, storage_view); let vm_out = execute_vm(self.l2_blocks_execution_data, vm)?; @@ -157,7 +158,7 @@ fn get_bowp_and_set_initial_values( /// Executes the VM and returns `FinishedL1Batch` on success. fn execute_vm( l2_blocks_execution_data: Vec, - mut vm: VmInstance, + mut vm: LegacyVmInstance, ) -> anyhow::Result { let next_l2_blocks_data = l2_blocks_execution_data.iter().skip(1); @@ -239,12 +240,12 @@ fn generate_tree_instructions( fn execute_tx( tx: &Transaction, - vm: &mut VmInstance, + vm: &mut LegacyVmInstance, ) -> anyhow::Result<()> { // Attempt to run VM with bytecode compression on. vm.make_snapshot(); if vm - .inspect_transaction_with_bytecode_compression(Default::default(), tx.clone(), true) + .execute_transaction_with_bytecode_compression(tx.clone(), true) .0 .is_ok() { @@ -255,7 +256,7 @@ fn execute_tx( // If failed with bytecode compression, attempt to run without bytecode compression. vm.rollback_to_the_latest_snapshot(); if vm - .inspect_transaction_with_bytecode_compression(Default::default(), tx.clone(), false) + .execute_transaction_with_bytecode_compression(tx.clone(), false) .0 .is_err() { diff --git a/core/lib/types/Cargo.toml b/core/lib/types/Cargo.toml index 55cbef761ad..54c38384a7a 100644 --- a/core/lib/types/Cargo.toml +++ b/core/lib/types/Cargo.toml @@ -28,6 +28,7 @@ once_cell.workspace = true rlp.workspace = true serde.workspace = true serde_json.workspace = true +serde_with = { workspace = true, features = ["hex"] } bigdecimal.workspace = true strum = { workspace = true, features = ["derive"] } thiserror.workspace = true diff --git a/core/lib/types/src/api/mod.rs b/core/lib/types/src/api/mod.rs index 916fae6a35b..1e5a1b3fe65 100644 --- a/core/lib/types/src/api/mod.rs +++ b/core/lib/types/src/api/mod.rs @@ -1,6 +1,7 @@ use chrono::{DateTime, Utc}; use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use serde_json::Value; +use serde_with::{hex::Hex, serde_as}; use strum::Display; use zksync_basic_types::{ tee_types::TeeType, @@ -12,7 +13,10 @@ use zksync_contracts::BaseSystemContractsHashes; pub use crate::transaction_request::{ Eip712Meta, SerializationTransactionError, TransactionRequest, }; -use crate::{protocol_version::L1VerifierConfig, Address, L2BlockNumber, ProtocolVersionId}; +use crate::{ + debug_flat_call::DebugCallFlat, protocol_version::L1VerifierConfig, Address, L2BlockNumber, + ProtocolVersionId, +}; pub mod en; pub mod state_override; @@ -601,6 +605,7 @@ pub struct ResultDebugCall { } #[derive(Debug, Default, Serialize, Deserialize, Clone, PartialEq)] +#[serde(rename_all = "camelCase")] pub enum DebugCallType { #[default] Call, @@ -700,19 +705,20 @@ impl ProtocolVersion { } } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "camelCase")] pub enum SupportedTracers { CallTracer, + FlatCallTracer, } -#[derive(Debug, Serialize, Deserialize, Clone, Default)] +#[derive(Debug, Serialize, Deserialize, Clone, Default, Copy)] #[serde(rename_all = "camelCase")] pub struct CallTracerConfig { pub only_top_call: bool, } -#[derive(Debug, Serialize, Deserialize, Clone)] +#[derive(Debug, Serialize, Deserialize, Clone, Copy)] #[serde(rename_all = "camelCase")] pub struct TracerConfig { pub tracer: SupportedTracers, @@ -720,6 +726,17 @@ pub struct TracerConfig { pub tracer_config: CallTracerConfig, } +impl Default for TracerConfig { + fn default() -> Self { + TracerConfig { + tracer: SupportedTracers::CallTracer, + tracer_config: CallTracerConfig { + only_top_call: false, + }, + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum BlockStatus { @@ -727,6 +744,54 @@ pub enum BlockStatus { Verified, } +/// Result tracers need to have a nested result field for compatibility. So we have two different +/// structs 1 for blocks tracing and one for txs and call tracing +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum CallTracerBlockResult { + CallTrace(Vec), + FlatCallTrace(Vec), +} + +impl CallTracerBlockResult { + pub fn unwrap_flat(self) -> Vec { + match self { + Self::CallTrace(_) => panic!("Result is a FlatCallTrace"), + Self::FlatCallTrace(trace) => trace, + } + } + + pub fn unwrap_default(self) -> Vec { + match self { + Self::CallTrace(trace) => trace, + Self::FlatCallTrace(_) => panic!("Result is a CallTrace"), + } + } +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +#[serde(untagged)] +pub enum CallTracerResult { + CallTrace(DebugCall), + FlatCallTrace(Vec), +} + +impl CallTracerResult { + pub fn unwrap_flat(self) -> Vec { + match self { + Self::CallTrace(_) => panic!("Result is a FlatCallTrace"), + Self::FlatCallTrace(trace) => trace, + } + } + + pub fn unwrap_default(self) -> DebugCall { + match self { + Self::CallTrace(trace) => trace, + Self::FlatCallTrace(_) => panic!("Result is a CallTrace"), + } + } +} + #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct BlockDetailsBase { @@ -784,15 +849,20 @@ pub struct Proof { pub storage_proof: Vec, } +#[serde_as] #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct TeeProof { pub l1_batch_number: L1BatchNumber, pub tee_type: Option, + #[serde_as(as = "Option")] pub pubkey: Option>, + #[serde_as(as = "Option")] pub signature: Option>, + #[serde_as(as = "Option")] pub proof: Option>, pub proved_at: DateTime, + #[serde_as(as = "Option")] pub attestation: Option>, } diff --git a/core/lib/types/src/api/state_override.rs b/core/lib/types/src/api/state_override.rs index a2497a65c53..f2986610840 100644 --- a/core/lib/types/src/api/state_override.rs +++ b/core/lib/types/src/api/state_override.rs @@ -21,6 +21,11 @@ impl StateOverride { self.0.get(address) } + /// Gets mutable overrides for the specified account. + pub fn get_mut(&mut self, address: &Address) -> Option<&mut OverrideAccount> { + self.0.get_mut(address) + } + /// Iterates over all account overrides. pub fn iter(&self) -> impl Iterator + '_ { self.0.iter() @@ -48,6 +53,12 @@ impl Bytecode { } } +impl AsRef<[u8]> for Bytecode { + fn as_ref(&self) -> &[u8] { + &self.0 .0 + } +} + impl Serialize for Bytecode { fn serialize(&self, serializer: S) -> Result { self.0.serialize(serializer) diff --git a/core/lib/types/src/commitment/mod.rs b/core/lib/types/src/commitment/mod.rs index 8bb2a16d3e5..0e9912688a9 100644 --- a/core/lib/types/src/commitment/mod.rs +++ b/core/lib/types/src/commitment/mod.rs @@ -92,7 +92,7 @@ pub struct L1BatchMetadata { /// commitment to the transactions in the batch. pub bootloader_initial_content_commitment: Option, pub state_diffs_compressed: Vec, - pub da_blob_id: Option> + pub da_blob_id: Option>, } impl L1BatchMetadata { diff --git a/core/lib/types/src/debug_flat_call.rs b/core/lib/types/src/debug_flat_call.rs index b5c0d79c857..89a008b5fb5 100644 --- a/core/lib/types/src/debug_flat_call.rs +++ b/core/lib/types/src/debug_flat_call.rs @@ -1,26 +1,24 @@ use serde::{Deserialize, Serialize}; use zksync_basic_types::{web3::Bytes, U256}; -use crate::{ - api::{DebugCall, DebugCallType, ResultDebugCall}, - Address, -}; +use crate::{api::DebugCallType, Address, H256}; #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct DebugCallFlat { pub action: Action, - pub result: CallResult, + pub result: Option, pub subtraces: usize, - pub traceaddress: Vec, - pub error: Option, - pub revert_reason: Option, + pub trace_address: Vec, + pub transaction_position: usize, + pub transaction_hash: H256, + pub r#type: DebugCallType, } #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Action { - pub r#type: DebugCallType, + pub call_type: DebugCallType, pub from: Address, pub to: Address, pub gas: U256, @@ -34,222 +32,3 @@ pub struct CallResult { pub output: Bytes, pub gas_used: U256, } - -pub fn flatten_debug_calls(calls: Vec) -> Vec { - let mut flattened_calls = Vec::new(); - for (index, result_debug_call) in calls.into_iter().enumerate() { - let mut trace_address = vec![index]; // Initialize the trace addressees with the index of the top-level call - flatten_call_recursive( - &result_debug_call.result, - &mut flattened_calls, - &mut trace_address, - ); - } - flattened_calls -} - -fn flatten_call_recursive( - call: &DebugCall, - flattened_calls: &mut Vec, - trace_address: &mut Vec, -) { - let flat_call = DebugCallFlat { - action: Action { - r#type: call.r#type.clone(), - from: call.from, - to: call.to, - gas: call.gas, - value: call.value, - input: call.input.clone(), - }, - result: CallResult { - output: call.output.clone(), - gas_used: call.gas_used, - }, - subtraces: call.calls.len(), - traceaddress: trace_address.clone(), // Clone the current trace address - error: call.error.clone(), - revert_reason: call.revert_reason.clone(), - }; - flattened_calls.push(flat_call); - - // Process nested calls - for (index, nested_call) in call.calls.iter().enumerate() { - trace_address.push(index); // Update trace addressees for the nested call - flatten_call_recursive(nested_call, flattened_calls, trace_address); - trace_address.pop(); // Reset trace addressees after processing the nested call (prevent to keep filling the vector) - } -} - -#[cfg(test)] -mod test { - use super::*; - use crate::{ - api::{DebugCall, DebugCallType, ResultDebugCall}, - Address, BOOTLOADER_ADDRESS, - }; - - #[test] - fn test_flatten_debug_call() { - let result_debug_trace: Vec = [1, 1] - .map(|_| ResultDebugCall { - result: new_testing_debug_call(), - }) - .into(); - - let debug_call_flat = flatten_debug_calls(result_debug_trace); - let expected_debug_call_flat = expected_flat_trace(); - assert_eq!(debug_call_flat, expected_debug_call_flat); - } - - fn new_testing_debug_call() -> DebugCall { - DebugCall { - r#type: DebugCallType::Call, - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - gas: 1000.into(), - gas_used: 1000.into(), - value: 0.into(), - output: vec![].into(), - input: vec![].into(), - error: None, - revert_reason: None, - calls: new_testing_trace(), - } - } - - fn new_testing_trace() -> Vec { - let first_call_trace = DebugCall { - from: Address::zero(), - to: Address::zero(), - gas: 100.into(), - gas_used: 42.into(), - ..DebugCall::default() - }; - let second_call_trace = DebugCall { - from: Address::zero(), - to: Address::zero(), - value: 123.into(), - gas: 58.into(), - gas_used: 10.into(), - input: Bytes(b"input".to_vec()), - output: Bytes(b"output".to_vec()), - ..DebugCall::default() - }; - [first_call_trace, second_call_trace].into() - } - - fn expected_flat_trace() -> Vec { - [ - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - gas: 1000.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 1000.into(), - }, - subtraces: 2, - traceaddress: [0].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 100.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 42.into(), - }, - subtraces: 0, - traceaddress: [0, 0].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 58.into(), - value: 123.into(), - input: b"input".to_vec().into(), - }, - result: CallResult { - output: b"output".to_vec().into(), - gas_used: 10.into(), - }, - subtraces: 0, - traceaddress: [0, 1].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: BOOTLOADER_ADDRESS, - gas: 1000.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 1000.into(), - }, - subtraces: 2, - traceaddress: [1].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 100.into(), - value: 0.into(), - input: vec![].into(), - }, - result: CallResult { - output: vec![].into(), - gas_used: 42.into(), - }, - subtraces: 0, - traceaddress: [1, 0].into(), - error: None, - revert_reason: None, - }, - DebugCallFlat { - action: Action { - r#type: DebugCallType::Call, - from: Address::zero(), - to: Address::zero(), - gas: 58.into(), - value: 123.into(), - input: b"input".to_vec().into(), - }, - result: CallResult { - output: b"output".to_vec().into(), - gas_used: 10.into(), - }, - subtraces: 0, - traceaddress: [1, 1].into(), - error: None, - revert_reason: None, - }, - ] - .into() - } -} diff --git a/core/lib/types/src/l1/mod.rs b/core/lib/types/src/l1/mod.rs index 05f08987a2d..e8144c75db2 100644 --- a/core/lib/types/src/l1/mod.rs +++ b/core/lib/types/src/l1/mod.rs @@ -274,7 +274,9 @@ impl From for abi::NewPriorityRequest { transaction: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&t.common_data.sender), - to: address_to_u256(&t.execute.contract_address), + // Unwrap used here because the contract address should always be present for L1 transactions. + // TODO: Consider restricting the contract address to not be optional in L1Tx. + to: address_to_u256(&t.execute.contract_address.unwrap()), gas_limit: t.common_data.gas_limit, gas_per_pubdata_byte_limit: t.common_data.gas_per_pubdata_limit, max_fee_per_gas: t.common_data.max_fee_per_gas, @@ -345,7 +347,7 @@ impl TryFrom for L1Tx { }; let execute = Execute { - contract_address: u256_to_account_address(&req.transaction.to), + contract_address: Some(u256_to_account_address(&req.transaction.to)), calldata: req.transaction.data, factory_deps: req.factory_deps, value: req.transaction.value, diff --git a/core/lib/types/src/l2/mod.rs b/core/lib/types/src/l2/mod.rs index 5a527640752..036d2a7a036 100644 --- a/core/lib/types/src/l2/mod.rs +++ b/core/lib/types/src/l2/mod.rs @@ -153,7 +153,7 @@ pub struct L2Tx { impl L2Tx { #[allow(clippy::too_many_arguments)] pub fn new( - contract_address: Address, + contract_address: Option
, calldata: Vec, nonce: Nonce, fee: Fee, @@ -185,7 +185,7 @@ impl L2Tx { #[allow(clippy::too_many_arguments)] pub fn new_signed( - contract_address: Address, + contract_address: Option
, calldata: Vec, nonce: Nonce, fee: Fee, @@ -232,7 +232,7 @@ impl L2Tx { } /// Returns recipient account of the transaction. - pub fn recipient_account(&self) -> Address { + pub fn recipient_account(&self) -> Option
{ self.execute.contract_address } @@ -324,7 +324,7 @@ impl From for TransactionRequest { let mut base_tx_req = TransactionRequest { nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), - to: Some(tx.recipient_account()), + to: tx.recipient_account(), value: tx.execute.value, gas_price: tx.common_data.fee.max_fee_per_gas, max_priority_fee_per_gas: None, @@ -400,7 +400,7 @@ impl From for api::Transaction { chain_id: U256::from(tx.common_data.extract_chain_id().unwrap_or_default()), nonce: U256::from(tx.common_data.nonce.0), from: Some(tx.common_data.initiator_address), - to: Some(tx.recipient_account()), + to: tx.recipient_account(), value: tx.execute.value, gas_price: Some(tx.common_data.fee.max_fee_per_gas), max_priority_fee_per_gas: Some(tx.common_data.fee.max_priority_fee_per_gas), diff --git a/core/lib/types/src/lib.rs b/core/lib/types/src/lib.rs index 402e16afd43..67661eb14ad 100644 --- a/core/lib/types/src/lib.rs +++ b/core/lib/types/src/lib.rs @@ -5,7 +5,7 @@ #![allow(clippy::upper_case_acronyms, clippy::derive_partial_eq_without_eq)] -use std::{fmt, fmt::Debug}; +use std::fmt; use anyhow::Context as _; use fee::encoding_len; @@ -88,9 +88,16 @@ pub struct Transaction { pub raw_bytes: Option, } -impl std::fmt::Debug for Transaction { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_tuple("Transaction").field(&self.hash()).finish() +impl fmt::Debug for Transaction { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(hash) = self.hash_for_debugging() { + f.debug_tuple("Transaction").field(&hash).finish() + } else { + f.debug_struct("Transaction") + .field("initiator_account", &self.initiator_account()) + .field("nonce", &self.nonce()) + .finish() + } } } @@ -104,7 +111,7 @@ impl Eq for Transaction {} impl Transaction { /// Returns recipient account of the transaction. - pub fn recipient_account(&self) -> Address { + pub fn recipient_account(&self) -> Option
{ self.execute.contract_address } @@ -136,6 +143,15 @@ impl Transaction { } } + fn hash_for_debugging(&self) -> Option { + match &self.common_data { + ExecuteTransactionCommon::L1(data) => Some(data.hash()), + ExecuteTransactionCommon::L2(data) if data.input.is_some() => Some(data.hash()), + ExecuteTransactionCommon::L2(_) => None, + ExecuteTransactionCommon::ProtocolUpgrade(data) => Some(data.hash()), + } + } + /// Returns the account that initiated this transaction. pub fn initiator_account(&self) -> Address { match &self.common_data { @@ -253,7 +269,7 @@ impl TryFrom for abi::Transaction { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&data.sender), - to: address_to_u256(&tx.execute.contract_address), + to: address_to_u256(&tx.execute.contract_address.unwrap_or_default()), gas_limit: data.gas_limit, gas_per_pubdata_byte_limit: data.gas_per_pubdata_limit, max_fee_per_gas: data.max_fee_per_gas, @@ -284,7 +300,7 @@ impl TryFrom for abi::Transaction { tx: abi::L2CanonicalTransaction { tx_type: PROTOCOL_UPGRADE_TX_TYPE.into(), from: address_to_u256(&data.sender), - to: address_to_u256(&tx.execute.contract_address), + to: address_to_u256(&tx.execute.contract_address.unwrap_or_default()), gas_limit: data.gas_limit, gas_per_pubdata_byte_limit: data.gas_per_pubdata_limit, max_fee_per_gas: data.max_fee_per_gas, @@ -377,7 +393,7 @@ impl TryFrom for Transaction { unknown_type => anyhow::bail!("unknown tx type {unknown_type}"), }, execute: Execute { - contract_address: u256_to_account_address(&tx.to), + contract_address: Some(u256_to_account_address(&tx.to)), calldata: tx.data, factory_deps, value: tx.value, diff --git a/core/lib/types/src/snapshots.rs b/core/lib/types/src/snapshots.rs index a29e5a91bf1..156d1e4723d 100644 --- a/core/lib/types/src/snapshots.rs +++ b/core/lib/types/src/snapshots.rs @@ -270,11 +270,11 @@ where pub struct SnapshotRecoveryStatus { pub l1_batch_number: L1BatchNumber, pub l1_batch_root_hash: H256, - #[debug("{} (raw: {})", utils::display_timestamp(**l1_batch_timestamp), l1_batch_timestamp)] + #[debug("{} (raw: {})", utils::display_timestamp(*l1_batch_timestamp), l1_batch_timestamp)] pub l1_batch_timestamp: u64, pub l2_block_number: L2BlockNumber, pub l2_block_hash: H256, - #[debug("{} (raw: {})", utils::display_timestamp(**l2_block_timestamp), l2_block_timestamp)] + #[debug("{} (raw: {})", utils::display_timestamp(*l2_block_timestamp), l2_block_timestamp)] pub l2_block_timestamp: u64, pub protocol_version: ProtocolVersionId, #[debug( diff --git a/core/lib/types/src/transaction_request.rs b/core/lib/types/src/transaction_request.rs index c71e6e4206c..5f26b1d6a6a 100644 --- a/core/lib/types/src/transaction_request.rs +++ b/core/lib/types/src/transaction_request.rs @@ -108,8 +108,8 @@ impl CallRequestBuilder { } /// Set to address (None allowed for eth_estimateGas) - pub fn to(mut self, to: Address) -> Self { - self.call_request.to = Some(to); + pub fn to(mut self, to: Option
) -> Self { + self.call_request.to = to; self } @@ -817,10 +817,13 @@ impl L2Tx { let meta = value.eip712_meta.take().unwrap_or_default(); validate_factory_deps(&meta.factory_deps)?; + // TODO: Remove this check when evm equivalence gets enabled + if value.to.is_none() { + return Err(SerializationTransactionError::ToAddressIsNull); + } + let mut tx = L2Tx::new( - value - .to - .ok_or(SerializationTransactionError::ToAddressIsNull)?, + value.to, value.input.0.clone(), nonce, fee, diff --git a/core/lib/types/src/tx/execute.rs b/core/lib/types/src/tx/execute.rs index 03762040a6b..c133261bc23 100644 --- a/core/lib/types/src/tx/execute.rs +++ b/core/lib/types/src/tx/execute.rs @@ -15,7 +15,7 @@ use crate::{ethabi, Address, EIP712TypedStructure, StructBuilder, H256, U256}; #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] struct ExecuteSerde { - contract_address: Address, + contract_address: Option
, #[serde(with = "ZeroPrefixHexSerde")] calldata: Vec, value: U256, @@ -25,7 +25,7 @@ struct ExecuteSerde { /// `Execute` transaction executes a previously deployed smart contract in the L2 rollup. #[derive(Clone, Default, PartialEq)] pub struct Execute { - pub contract_address: Address, + pub contract_address: Option
, pub calldata: Vec, pub value: U256, /// Factory dependencies: list of contract bytecodes associated with the deploy transaction. @@ -72,7 +72,9 @@ impl EIP712TypedStructure for Execute { const TYPE_NAME: &'static str = "Transaction"; fn build_structure(&self, builder: &mut BUILDER) { - builder.add_member("to", &U256::from(self.contract_address.as_bytes())); + if let Some(contract_address) = self.contract_address { + builder.add_member("to", &contract_address); + } builder.add_member("value", &self.value); builder.add_member("data", &self.calldata.as_slice()); // Factory deps are not included into the transaction signature, since they are parsed from the diff --git a/core/lib/vlog/src/lib.rs b/core/lib/vlog/src/lib.rs index 268fbd0b39e..598d17879b8 100644 --- a/core/lib/vlog/src/lib.rs +++ b/core/lib/vlog/src/lib.rs @@ -4,6 +4,7 @@ use std::time::Duration; use ::sentry::ClientInitGuard; +use anyhow::Context as _; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; pub use crate::{logs::Logs, opentelemetry::OpenTelemetry, sentry::Sentry}; @@ -126,8 +127,9 @@ impl ObservabilityBuilder { self } - /// Initializes the observability subsystem. - pub fn build(self) -> ObservabilityGuard { + /// Tries to initialize the observability subsystem. Returns an error if it's already initialized. + /// This is mostly useful in tests. + pub fn try_build(self) -> anyhow::Result { let logs = self.logs.unwrap_or_default(); logs.install_panic_hook(); @@ -151,14 +153,20 @@ impl ObservabilityBuilder { .with(logs_layer) .with(otlp_tracing_layer) .with(otlp_logging_layer) - .init(); + .try_init() + .context("failed installing global tracer / logger")?; let sentry_guard = self.sentry.map(|sentry| sentry.install()); - ObservabilityGuard { + Ok(ObservabilityGuard { otlp_tracing_provider, otlp_logging_provider, sentry_guard, - } + }) + } + + /// Initializes the observability subsystem. + pub fn build(self) -> ObservabilityGuard { + self.try_build().unwrap() } } diff --git a/core/lib/vm_executor/src/batch/factory.rs b/core/lib/vm_executor/src/batch/factory.rs index d6f7555b767..146f0bb4e5c 100644 --- a/core/lib/vm_executor/src/batch/factory.rs +++ b/core/lib/vm_executor/src/batch/factory.rs @@ -1,4 +1,4 @@ -use std::{marker::PhantomData, rc::Rc, sync::Arc, time::Duration}; +use std::{borrow::Cow, fmt, marker::PhantomData, rc::Rc, sync::Arc, time::Duration}; use anyhow::Context as _; use once_cell::sync::OnceCell; @@ -6,13 +6,16 @@ use tokio::sync::mpsc; use zksync_multivm::{ interface::{ executor::{BatchExecutor, BatchExecutorFactory}, - storage::{ReadStorage, StorageView, StorageViewStats}, - BatchTransactionExecutionResult, ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, - L2BlockEnv, SystemEnv, VmInterface, VmInterfaceHistoryEnabled, + storage::{ReadStorage, StoragePtr, StorageView, StorageViewStats}, + utils::DivergenceHandler, + BatchTransactionExecutionResult, BytecodeCompressionError, CompressedBytecodeInfo, + ExecutionResult, FinishedL1Batch, Halt, L1BatchEnv, L2BlockEnv, SystemEnv, VmFactory, + VmInterface, VmInterfaceHistoryEnabled, }, tracers::CallTracer, + vm_fast, vm_latest::HistoryEnabled, - MultiVMTracer, VmInstance, + FastVmInstance, LegacyVmInstance, MultiVMTracer, }; use zksync_types::{vm::FastVmMode, Transaction}; @@ -20,13 +23,43 @@ use super::{ executor::{Command, MainBatchExecutor}, metrics::{TxExecutionStage, BATCH_TIP_METRICS, EXECUTOR_METRICS, KEEPER_METRICS}, }; -use crate::shared::{InteractionType, STORAGE_METRICS}; +use crate::shared::{InteractionType, Sealed, STORAGE_METRICS}; + +/// Encapsulates a tracer used during batch processing. Currently supported tracers are `()` (no-op) and [`TraceCalls`]. +/// +/// All members of this trait are implementation details. +pub trait BatchTracer: fmt::Debug + 'static + Send + Sealed { + /// True if call tracing is enabled. Used by legacy VMs which enable / disable call tracing dynamically. + #[doc(hidden)] + const TRACE_CALLS: bool; + /// Tracer for the fast VM. + #[doc(hidden)] + type Fast: vm_fast::Tracer + Default + 'static; +} + +impl Sealed for () {} + +/// No-op implementation that doesn't trace anything. +impl BatchTracer for () { + const TRACE_CALLS: bool = false; + type Fast = (); +} + +/// [`BatchTracer`] implementation tracing calls (returned in [`BatchTransactionExecutionResult`]s). +#[derive(Debug)] +pub struct TraceCalls(()); + +impl Sealed for TraceCalls {} + +impl BatchTracer for TraceCalls { + const TRACE_CALLS: bool = true; + type Fast = (); // TODO: change once call tracing is implemented in fast VM +} /// The default implementation of [`BatchExecutorFactory`]. /// Creates real batch executors which maintain the VM (as opposed to the test factories which don't use the VM). #[derive(Debug, Clone)] -pub struct MainBatchExecutorFactory { - save_call_traces: bool, +pub struct MainBatchExecutorFactory { /// Whether batch executor would allow transactions with bytecode that cannot be compressed. /// For new blocks, bytecode compression is mandatory -- if bytecode compression is not supported, /// the transaction will be rejected. @@ -36,15 +69,18 @@ pub struct MainBatchExecutorFactory { optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, observe_storage_metrics: bool, + divergence_handler: Option, + _tracer: PhantomData, } -impl MainBatchExecutorFactory { - pub fn new(save_call_traces: bool, optional_bytecode_compression: bool) -> Self { +impl MainBatchExecutorFactory { + pub fn new(optional_bytecode_compression: bool) -> Self { Self { - save_call_traces, optional_bytecode_compression, fast_vm_mode: FastVmMode::Old, observe_storage_metrics: false, + divergence_handler: None, + _tracer: PhantomData, } } @@ -64,9 +100,16 @@ impl MainBatchExecutorFactory { pub fn observe_storage_metrics(&mut self) { self.observe_storage_metrics = true; } + + pub fn set_divergence_handler(&mut self, handler: DivergenceHandler) { + tracing::info!("Set VM divergence handler"); + self.divergence_handler = Some(handler); + } } -impl BatchExecutorFactory for MainBatchExecutorFactory { +impl BatchExecutorFactory + for MainBatchExecutorFactory +{ fn init_batch( &mut self, storage: S, @@ -77,12 +120,13 @@ impl BatchExecutorFactory for MainBatchExecu // until a previous command is processed), capacity 1 is enough for the commands channel. let (commands_sender, commands_receiver) = mpsc::channel(1); let executor = CommandReceiver { - save_call_traces: self.save_call_traces, optional_bytecode_compression: self.optional_bytecode_compression, fast_vm_mode: self.fast_vm_mode, observe_storage_metrics: self.observe_storage_metrics, + divergence_handler: self.divergence_handler.clone(), commands: commands_receiver, _storage: PhantomData, + _tracer: PhantomData::, }; let handle = @@ -91,6 +135,103 @@ impl BatchExecutorFactory for MainBatchExecu } } +type BytecodeResult = Result, BytecodeCompressionError>; + +#[derive(Debug)] +enum BatchVm { + Legacy(LegacyVmInstance), + Fast(FastVmInstance), +} + +macro_rules! dispatch_batch_vm { + ($self:ident.$function:ident($($params:tt)*)) => { + match $self { + Self::Legacy(vm) => vm.$function($($params)*), + Self::Fast(vm) => vm.$function($($params)*), + } + }; +} + +impl BatchVm { + fn new( + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + storage_ptr: StoragePtr>, + mode: FastVmMode, + ) -> Self { + match mode { + FastVmMode::Old => { + Self::Legacy(LegacyVmInstance::new(l1_batch_env, system_env, storage_ptr)) + } + FastVmMode::New => { + Self::Fast(FastVmInstance::fast(l1_batch_env, system_env, storage_ptr)) + } + FastVmMode::Shadow => Self::Fast(FastVmInstance::shadowed( + l1_batch_env, + system_env, + storage_ptr, + )), + } + } + + fn start_new_l2_block(&mut self, l2_block: L2BlockEnv) { + dispatch_batch_vm!(self.start_new_l2_block(l2_block)); + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + dispatch_batch_vm!(self.finish_batch()) + } + + fn make_snapshot(&mut self) { + dispatch_batch_vm!(self.make_snapshot()); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + dispatch_batch_vm!(self.rollback_to_the_latest_snapshot()); + } + + fn pop_snapshot_no_rollback(&mut self) { + dispatch_batch_vm!(self.pop_snapshot_no_rollback()); + } + + fn inspect_transaction( + &mut self, + tx: Transaction, + with_compression: bool, + ) -> BatchTransactionExecutionResult { + let call_tracer_result = Arc::new(OnceCell::default()); + let legacy_tracer = if Tr::TRACE_CALLS { + vec![CallTracer::new(call_tracer_result.clone()).into_tracer_pointer()] + } else { + vec![] + }; + let mut legacy_tracer = legacy_tracer.into(); + + let (compression_result, tx_result) = match self { + Self::Legacy(vm) => vm.inspect_transaction_with_bytecode_compression( + &mut legacy_tracer, + tx, + with_compression, + ), + Self::Fast(vm) => { + let mut tracer = (legacy_tracer.into(), ::default()); + vm.inspect_transaction_with_bytecode_compression(&mut tracer, tx, with_compression) + } + }; + + let compressed_bytecodes = compression_result.map(Cow::into_owned); + let call_traces = Arc::try_unwrap(call_tracer_result) + .expect("failed extracting call traces") + .take() + .unwrap_or_default(); + BatchTransactionExecutionResult { + tx_result: Box::new(tx_result), + compressed_bytecodes, + call_traces, + } + } +} + /// Implementation of the "primary" (non-test) batch executor. /// Upon launch, it initializes the VM object with provided block context and properties, and keeps invoking the commands /// sent to it one by one until the batch is finished. @@ -98,16 +239,17 @@ impl BatchExecutorFactory for MainBatchExecu /// One `CommandReceiver` can execute exactly one batch, so once the batch is sealed, a new `CommandReceiver` object must /// be constructed. #[derive(Debug)] -struct CommandReceiver { - save_call_traces: bool, +struct CommandReceiver { optional_bytecode_compression: bool, fast_vm_mode: FastVmMode, observe_storage_metrics: bool, + divergence_handler: Option, commands: mpsc::Receiver, _storage: PhantomData, + _tracer: PhantomData, } -impl CommandReceiver { +impl CommandReceiver { pub(super) fn run( mut self, storage: S, @@ -117,7 +259,7 @@ impl CommandReceiver { tracing::info!("Starting executing L1 batch #{}", &l1_batch_params.number); let storage_view = StorageView::new(storage).to_rc_ptr(); - let mut vm = VmInstance::maybe_fast( + let mut vm = BatchVm::::new( l1_batch_params, system_env, storage_view.clone(), @@ -126,6 +268,12 @@ impl CommandReceiver { let mut batch_finished = false; let mut prev_storage_stats = StorageViewStats::default(); + if let BatchVm::Fast(FastVmInstance::Shadowed(shadowed)) = &mut vm { + if let Some(handler) = self.divergence_handler.take() { + shadowed.set_divergence_handler(handler); + } + } + while let Some(cmd) = self.commands.blocking_recv() { match cmd { Command::ExecuteTx(tx, resp) => { @@ -151,7 +299,7 @@ impl CommandReceiver { } } Command::StartNextL2Block(l2_block_env, resp) => { - self.start_next_l2_block(l2_block_env, &mut vm); + vm.start_new_l2_block(l2_block_env); if resp.send(()).is_err() { break; } @@ -187,7 +335,7 @@ impl CommandReceiver { fn execute_tx( &self, transaction: Transaction, - vm: &mut VmInstance, + vm: &mut BatchVm, ) -> anyhow::Result<(BatchTransactionExecutionResult, Duration)> { // Executing a next transaction means that a previous transaction was either rolled back (in which case its snapshot // was already removed), or that we build on top of it (in which case, it can be removed now). @@ -206,24 +354,13 @@ impl CommandReceiver { Ok((result, latency.observe())) } - fn rollback_last_tx(&self, vm: &mut VmInstance) { + fn rollback_last_tx(&self, vm: &mut BatchVm) { let latency = KEEPER_METRICS.tx_execution_time[&TxExecutionStage::TxRollback].start(); vm.rollback_to_the_latest_snapshot(); latency.observe(); } - fn start_next_l2_block( - &self, - l2_block_env: L2BlockEnv, - vm: &mut VmInstance, - ) { - vm.start_new_l2_block(l2_block_env); - } - - fn finish_batch( - &self, - vm: &mut VmInstance, - ) -> anyhow::Result { + fn finish_batch(&self, vm: &mut BatchVm) -> anyhow::Result { // The vm execution was paused right after the last transaction was executed. // There is some post-processing work that the VM needs to do before the block is fully processed. let result = vm.finish_batch(); @@ -242,7 +379,7 @@ impl CommandReceiver { fn execute_tx_in_vm_with_optional_compression( &self, tx: &Transaction, - vm: &mut VmInstance, + vm: &mut BatchVm, ) -> anyhow::Result { // Note, that the space where we can put the calldata for compressing transactions // is limited and the transactions do not pay for taking it. @@ -253,24 +390,12 @@ impl CommandReceiver { // it means that there is no sense in polluting the space of compressed bytecodes, // and so we re-execute the transaction, but without compression. - let call_tracer_result = Arc::new(OnceCell::default()); - let tracer = if self.save_call_traces { - vec![CallTracer::new(call_tracer_result.clone()).into_tracer_pointer()] - } else { - vec![] - }; - - if let (Ok(compressed_bytecodes), tx_result) = - vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true) - { - let call_traces = Arc::try_unwrap(call_tracer_result) - .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? - .take() - .unwrap_or_default(); + let res = vm.inspect_transaction(tx.clone(), true); + if let Ok(compressed_bytecodes) = res.compressed_bytecodes { return Ok(BatchTransactionExecutionResult { - tx_result: Box::new(tx_result), - compressed_bytecodes: compressed_bytecodes.into_owned(), - call_traces, + tx_result: res.tx_result, + compressed_bytecodes, + call_traces: res.call_traces, }); } @@ -279,29 +404,14 @@ impl CommandReceiver { vm.rollback_to_the_latest_snapshot(); vm.make_snapshot(); - let call_tracer_result = Arc::new(OnceCell::default()); - let tracer = if self.save_call_traces { - vec![CallTracer::new(call_tracer_result.clone()).into_tracer_pointer()] - } else { - vec![] - }; - - let (compression_result, tx_result) = - vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), false); - let compressed_bytecodes = compression_result - .context("compression failed when it wasn't applied")? - .into_owned(); - - // TODO implement tracer manager which will be responsible - // for collecting result from all tracers and save it to the database - let call_traces = Arc::try_unwrap(call_tracer_result) - .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? - .take() - .unwrap_or_default(); + let res = vm.inspect_transaction(tx.clone(), false); + let compressed_bytecodes = res + .compressed_bytecodes + .context("compression failed when it wasn't applied")?; Ok(BatchTransactionExecutionResult { - tx_result: Box::new(tx_result), + tx_result: res.tx_result, compressed_bytecodes, - call_traces, + call_traces: res.call_traces, }) } @@ -310,34 +420,23 @@ impl CommandReceiver { fn execute_tx_in_vm( &self, tx: &Transaction, - vm: &mut VmInstance, + vm: &mut BatchVm, ) -> anyhow::Result { - let call_tracer_result = Arc::new(OnceCell::default()); - let tracer = if self.save_call_traces { - vec![CallTracer::new(call_tracer_result.clone()).into_tracer_pointer()] - } else { - vec![] - }; - - let (bytecodes_result, mut tx_result) = - vm.inspect_transaction_with_bytecode_compression(tracer.into(), tx.clone(), true); - if let Ok(compressed_bytecodes) = bytecodes_result { - let call_traces = Arc::try_unwrap(call_tracer_result) - .map_err(|_| anyhow::anyhow!("failed extracting call traces"))? - .take() - .unwrap_or_default(); + let res = vm.inspect_transaction(tx.clone(), true); + if let Ok(compressed_bytecodes) = res.compressed_bytecodes { Ok(BatchTransactionExecutionResult { - tx_result: Box::new(tx_result), - compressed_bytecodes: compressed_bytecodes.into_owned(), - call_traces, + tx_result: res.tx_result, + compressed_bytecodes, + call_traces: res.call_traces, }) } else { // Transaction failed to publish bytecodes, we reject it so initiator doesn't pay fee. + let mut tx_result = res.tx_result; tx_result.result = ExecutionResult::Halt { reason: Halt::FailedToPublishCompressedBytecodes, }; Ok(BatchTransactionExecutionResult { - tx_result: Box::new(tx_result), + tx_result, compressed_bytecodes: vec![], call_traces: vec![], }) diff --git a/core/lib/vm_executor/src/batch/mod.rs b/core/lib/vm_executor/src/batch/mod.rs index 2407d2daba2..82882e24c59 100644 --- a/core/lib/vm_executor/src/batch/mod.rs +++ b/core/lib/vm_executor/src/batch/mod.rs @@ -2,7 +2,10 @@ //! //! This implementation is used by various ZKsync components, like the state keeper and components based on the VM runner. -pub use self::{executor::MainBatchExecutor, factory::MainBatchExecutorFactory}; +pub use self::{ + executor::MainBatchExecutor, + factory::{BatchTracer, MainBatchExecutorFactory, TraceCalls}, +}; mod executor; mod factory; diff --git a/core/node/api_server/src/execution_sandbox/apply.rs b/core/lib/vm_executor/src/oneshot/block.rs similarity index 56% rename from core/node/api_server/src/execution_sandbox/apply.rs rename to core/lib/vm_executor/src/oneshot/block.rs index 0fbf8abc3dd..8ba77305ad7 100644 --- a/core/node/api_server/src/execution_sandbox/apply.rs +++ b/core/lib/vm_executor/src/oneshot/block.rs @@ -1,89 +1,236 @@ -//! This module provides primitives focusing on the VM instantiation and execution for different use cases. -//! It is rather generic and low-level, so it's not supposed to be a part of public API. -//! -//! Instead, we expect people to write wrappers in the `execution_sandbox` module with a more high-level API -//! that would, in its turn, be used by the actual API method handlers. -//! -//! This module is intended to be blocking. - -use std::time::{Duration, Instant}; - -use anyhow::Context as _; -use tokio::runtime::Handle; +use anyhow::Context; use zksync_dal::{Connection, Core, CoreDal, DalError}; use zksync_multivm::{ - interface::{L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv}, - utils::get_eth_call_gas_limit, + interface::{L1BatchEnv, L2BlockEnv, OneshotEnv, StoredL2BlockEnv, SystemEnv, TxExecutionMode}, vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, }; -use zksync_state::PostgresStorage; -use zksync_system_constants::{ - SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, - SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ZKPORTER_IS_AVAILABLE, -}; use zksync_types::{ api, block::{unpack_block_info, L2BlockHasher}, fee_model::BatchFeeInput, - AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, U256, + AccountTreeId, L1BatchNumber, L2BlockNumber, ProtocolVersionId, StorageKey, H256, + SYSTEM_CONTEXT_ADDRESS, SYSTEM_CONTEXT_CURRENT_L2_BLOCK_INFO_POSITION, + SYSTEM_CONTEXT_CURRENT_TX_ROLLING_HASH_POSITION, ZKPORTER_IS_AVAILABLE, }; use zksync_utils::{h256_to_u256, time::seconds_since_epoch}; -use super::{ - vm_metrics::{SandboxStage, SANDBOX_METRICS}, - BlockArgs, TxSetupArgs, -}; +use super::env::OneshotEnvParameters; -pub(super) async fn prepare_env_and_storage( - mut connection: Connection<'static, Core>, - setup_args: TxSetupArgs, - block_args: &BlockArgs, -) -> anyhow::Result<(OneshotEnv, PostgresStorage<'static>)> { - let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); +/// Block information necessary to execute a transaction / call. Unlike [`ResolvedBlockInfo`], this information is *partially* resolved, +/// which is beneficial for some data workflows. +#[derive(Debug, Clone, Copy)] +pub struct BlockInfo { + resolved_block_number: L2BlockNumber, + l1_batch_timestamp_s: Option, +} - let resolve_started_at = Instant::now(); - let resolved_block_info = block_args - .resolve_block_info(&mut connection) - .await - .with_context(|| format!("cannot resolve block numbers for {block_args:?}"))?; - let resolve_time = resolve_started_at.elapsed(); - // We don't want to emit too many logs. - if resolve_time > Duration::from_millis(10) { - tracing::debug!("Resolved block numbers (took {resolve_time:?})"); +impl BlockInfo { + /// Fetches information for a pending block. + pub async fn pending(connection: &mut Connection<'_, Core>) -> anyhow::Result { + let resolved_block_number = connection + .blocks_web3_dal() + .resolve_block_id(api::BlockId::Number(api::BlockNumber::Pending)) + .await + .map_err(DalError::generalize)? + .context("pending block should always be present in Postgres")?; + Ok(Self { + resolved_block_number, + l1_batch_timestamp_s: None, + }) } - if block_args.resolves_to_latest_sealed_l2_block() { - setup_args - .caches - .schedule_values_update(resolved_block_info.state_l2_block_number); + /// Fetches information for an existing block. Will error if the block is not present in Postgres. + pub async fn for_existing_block( + connection: &mut Connection<'_, Core>, + number: L2BlockNumber, + ) -> anyhow::Result { + let l1_batch = connection + .storage_web3_dal() + .resolve_l1_batch_number_of_l2_block(number) + .await + .with_context(|| format!("failed resolving L1 batch number of L2 block #{number}"))?; + let l1_batch_timestamp = connection + .blocks_web3_dal() + .get_expected_l1_batch_timestamp(&l1_batch) + .await + .map_err(DalError::generalize)? + .context("missing timestamp for non-pending block")?; + Ok(Self { + resolved_block_number: number, + l1_batch_timestamp_s: Some(l1_batch_timestamp), + }) } - let (next_block, current_block) = load_l2_block_info( - &mut connection, - block_args.is_pending_l2_block(), - &resolved_block_info, - ) - .await?; + /// Returns L2 block number. + pub fn block_number(&self) -> L2BlockNumber { + self.resolved_block_number + } - let storage = PostgresStorage::new_async( - Handle::current(), - connection, - resolved_block_info.state_l2_block_number, - false, - ) - .await - .context("cannot create `PostgresStorage`")? - .with_caches(setup_args.caches.clone()); + /// Returns L1 batch timestamp in seconds, or `None` for a pending block. + pub fn l1_batch_timestamp(&self) -> Option { + self.l1_batch_timestamp_s + } - let (system, l1_batch) = prepare_env(setup_args, &resolved_block_info, next_block); + fn is_pending_l2_block(&self) -> bool { + self.l1_batch_timestamp_s.is_none() + } - let env = OneshotEnv { - system, - l1_batch, - current_block, - }; - initialization_stage.observe(); - Ok((env, storage)) + /// Loads historical fee input for this block. If the block is not present in the DB, returns an error. + pub async fn historical_fee_input( + &self, + connection: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let header = connection + .blocks_dal() + .get_l2_block_header(self.resolved_block_number) + .await? + .context("resolved L2 block is not in storage")?; + Ok(header.batch_fee_input) + } + + /// Resolves this information into [`ResolvedBlockInfo`]. + pub async fn resolve( + &self, + connection: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let (state_l2_block_number, vm_l1_batch_number, l1_batch_timestamp); + + let l2_block_header = if let Some(l1_batch_timestamp_s) = self.l1_batch_timestamp_s { + vm_l1_batch_number = connection + .storage_web3_dal() + .resolve_l1_batch_number_of_l2_block(self.resolved_block_number) + .await + .context("failed resolving L1 batch for L2 block")? + .expected_l1_batch(); + l1_batch_timestamp = l1_batch_timestamp_s; + state_l2_block_number = self.resolved_block_number; + + connection + .blocks_dal() + .get_l2_block_header(self.resolved_block_number) + .await? + .context("resolved L2 block disappeared from storage")? + } else { + vm_l1_batch_number = connection + .blocks_dal() + .get_sealed_l1_batch_number() + .await? + .context("no L1 batches in storage")?; + let sealed_l2_block_header = connection + .blocks_dal() + .get_last_sealed_l2_block_header() + .await? + .context("no L2 blocks in storage")?; + + state_l2_block_number = sealed_l2_block_header.number; + // Timestamp of the next L1 batch must be greater than the timestamp of the last L2 block. + l1_batch_timestamp = seconds_since_epoch().max(sealed_l2_block_header.timestamp + 1); + sealed_l2_block_header + }; + + // Blocks without version specified are considered to be of `Version9`. + // TODO: remove `unwrap_or` when protocol version ID will be assigned for each block. + let protocol_version = l2_block_header + .protocol_version + .unwrap_or(ProtocolVersionId::last_potentially_undefined()); + + Ok(ResolvedBlockInfo { + state_l2_block_number, + state_l2_block_hash: l2_block_header.hash, + vm_l1_batch_number, + l1_batch_timestamp, + protocol_version, + is_pending: self.is_pending_l2_block(), + }) + } +} + +/// Resolved [`BlockInfo`] containing additional data from VM state. +#[derive(Debug)] +pub struct ResolvedBlockInfo { + state_l2_block_number: L2BlockNumber, + state_l2_block_hash: H256, + vm_l1_batch_number: L1BatchNumber, + l1_batch_timestamp: u64, + protocol_version: ProtocolVersionId, + is_pending: bool, +} + +impl ResolvedBlockInfo { + /// L2 block number (as stored in Postgres). This number may differ from `block.number` provided to the VM. + pub fn state_l2_block_number(&self) -> L2BlockNumber { + self.state_l2_block_number + } +} + +impl OneshotEnvParameters { + pub(super) async fn to_env_inner( + &self, + connection: &mut Connection<'_, Core>, + execution_mode: TxExecutionMode, + resolved_block_info: &ResolvedBlockInfo, + fee_input: BatchFeeInput, + enforced_base_fee: Option, + ) -> anyhow::Result { + let (next_block, current_block) = load_l2_block_info( + connection, + resolved_block_info.is_pending, + resolved_block_info, + ) + .await?; + + let (system, l1_batch) = self.prepare_env( + execution_mode, + resolved_block_info, + next_block, + fee_input, + enforced_base_fee, + ); + Ok(OneshotEnv { + system, + l1_batch, + current_block, + }) + } + + fn prepare_env( + &self, + execution_mode: TxExecutionMode, + resolved_block_info: &ResolvedBlockInfo, + next_block: L2BlockEnv, + fee_input: BatchFeeInput, + enforced_base_fee: Option, + ) -> (SystemEnv, L1BatchEnv) { + let &Self { + operator_account, + validation_computational_gas_limit, + chain_id, + .. + } = self; + + let system_env = SystemEnv { + zk_porter_available: ZKPORTER_IS_AVAILABLE, + version: resolved_block_info.protocol_version, + base_system_smart_contracts: self + .base_system_contracts + .get_by_protocol_version(resolved_block_info.protocol_version) + .clone(), + bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, + execution_mode, + default_validation_computational_gas_limit: validation_computational_gas_limit, + chain_id, + }; + let l1_batch_env = L1BatchEnv { + previous_batch_hash: None, + number: resolved_block_info.vm_l1_batch_number, + timestamp: resolved_block_info.l1_batch_timestamp, + fee_input, + fee_account: *operator_account.address(), + enforced_base_fee, + first_l2_block: next_block, + }; + (system_env, l1_batch_env) + } } async fn load_l2_block_info( @@ -155,48 +302,6 @@ async fn load_l2_block_info( Ok((next_block, current_block)) } -fn prepare_env( - setup_args: TxSetupArgs, - resolved_block_info: &ResolvedBlockInfo, - next_block: L2BlockEnv, -) -> (SystemEnv, L1BatchEnv) { - let TxSetupArgs { - execution_mode, - operator_account, - fee_input, - base_system_contracts, - validation_computational_gas_limit, - chain_id, - enforced_base_fee, - .. - } = setup_args; - - // In case we are executing in a past block, we'll use the historical fee data. - let fee_input = resolved_block_info - .historical_fee_input - .unwrap_or(fee_input); - let system_env = SystemEnv { - zk_porter_available: ZKPORTER_IS_AVAILABLE, - version: resolved_block_info.protocol_version, - base_system_smart_contracts: base_system_contracts - .get_by_protocol_version(resolved_block_info.protocol_version), - bootloader_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - execution_mode, - default_validation_computational_gas_limit: validation_computational_gas_limit, - chain_id, - }; - let l1_batch_env = L1BatchEnv { - previous_batch_hash: None, - number: resolved_block_info.vm_l1_batch_number, - timestamp: resolved_block_info.l1_batch_timestamp, - fee_input, - fee_account: *operator_account.address(), - enforced_base_fee, - first_l2_block: next_block, - }; - (system_env, l1_batch_env) -} - async fn read_stored_l2_block( connection: &mut Connection<'_, Core>, l2_block_number: L2BlockNumber, @@ -226,102 +331,3 @@ async fn read_stored_l2_block( txs_rolling_hash, }) } - -#[derive(Debug)] -pub(crate) struct ResolvedBlockInfo { - state_l2_block_number: L2BlockNumber, - state_l2_block_hash: H256, - vm_l1_batch_number: L1BatchNumber, - l1_batch_timestamp: u64, - pub(crate) protocol_version: ProtocolVersionId, - historical_fee_input: Option, -} - -impl BlockArgs { - fn is_pending_l2_block(&self) -> bool { - matches!( - self.block_id, - api::BlockId::Number(api::BlockNumber::Pending) - ) - } - - pub(crate) async fn default_eth_call_gas( - &self, - connection: &mut Connection<'_, Core>, - ) -> anyhow::Result { - let protocol_version = self - .resolve_block_info(connection) - .await - .context("failed to resolve block info")? - .protocol_version; - Ok(get_eth_call_gas_limit(protocol_version.into()).into()) - } - - async fn resolve_block_info( - &self, - connection: &mut Connection<'_, Core>, - ) -> anyhow::Result { - let (state_l2_block_number, vm_l1_batch_number, l1_batch_timestamp); - - let l2_block_header = if self.is_pending_l2_block() { - vm_l1_batch_number = connection - .blocks_dal() - .get_sealed_l1_batch_number() - .await? - .context("no L1 batches in storage")?; - let sealed_l2_block_header = connection - .blocks_dal() - .get_last_sealed_l2_block_header() - .await? - .context("no L2 blocks in storage")?; - - state_l2_block_number = sealed_l2_block_header.number; - // Timestamp of the next L1 batch must be greater than the timestamp of the last L2 block. - l1_batch_timestamp = seconds_since_epoch().max(sealed_l2_block_header.timestamp + 1); - sealed_l2_block_header - } else { - vm_l1_batch_number = connection - .storage_web3_dal() - .resolve_l1_batch_number_of_l2_block(self.resolved_block_number) - .await - .context("failed resolving L1 batch for L2 block")? - .expected_l1_batch(); - l1_batch_timestamp = self - .l1_batch_timestamp_s - .context("L1 batch timestamp is `None` for non-pending block args")?; - state_l2_block_number = self.resolved_block_number; - - connection - .blocks_dal() - .get_l2_block_header(self.resolved_block_number) - .await? - .context("resolved L2 block disappeared from storage")? - }; - - let historical_fee_input = if !self.resolves_to_latest_sealed_l2_block() { - let l2_block_header = connection - .blocks_dal() - .get_l2_block_header(self.resolved_block_number) - .await? - .context("resolved L2 block is not in storage")?; - Some(l2_block_header.batch_fee_input) - } else { - None - }; - - // Blocks without version specified are considered to be of `Version9`. - // TODO: remove `unwrap_or` when protocol version ID will be assigned for each block. - let protocol_version = l2_block_header - .protocol_version - .unwrap_or(ProtocolVersionId::last_potentially_undefined()); - - Ok(ResolvedBlockInfo { - state_l2_block_number, - state_l2_block_hash: l2_block_header.hash, - vm_l1_batch_number, - l1_batch_timestamp, - protocol_version, - historical_fee_input, - }) - } -} diff --git a/core/lib/vm_executor/src/oneshot/contracts.rs b/core/lib/vm_executor/src/oneshot/contracts.rs new file mode 100644 index 00000000000..3b3a65fe30b --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/contracts.rs @@ -0,0 +1,91 @@ +use zksync_contracts::BaseSystemContracts; +use zksync_types::ProtocolVersionId; + +/// System contracts (bootloader and default account abstraction) for all supported VM versions. +#[derive(Debug, Clone)] +pub(super) struct MultiVMBaseSystemContracts { + /// Contracts to be used for pre-virtual-blocks protocol versions. + pre_virtual_blocks: BaseSystemContracts, + /// Contracts to be used for post-virtual-blocks protocol versions. + post_virtual_blocks: BaseSystemContracts, + /// Contracts to be used for protocol versions after virtual block upgrade fix. + post_virtual_blocks_finish_upgrade_fix: BaseSystemContracts, + /// Contracts to be used for post-boojum protocol versions. + post_boojum: BaseSystemContracts, + /// Contracts to be used after the allow-list removal upgrade + post_allowlist_removal: BaseSystemContracts, + /// Contracts to be used after the 1.4.1 upgrade + post_1_4_1: BaseSystemContracts, + /// Contracts to be used after the 1.4.2 upgrade + post_1_4_2: BaseSystemContracts, + /// Contracts to be used during the `v23` upgrade. This upgrade was done on an internal staging environment only. + vm_1_5_0_small_memory: BaseSystemContracts, + /// Contracts to be used after the 1.5.0 upgrade + vm_1_5_0_increased_memory: BaseSystemContracts, +} + +impl MultiVMBaseSystemContracts { + /// Gets contracts for a certain version. + pub fn get_by_protocol_version(&self, version: ProtocolVersionId) -> &BaseSystemContracts { + match version { + ProtocolVersionId::Version0 + | ProtocolVersionId::Version1 + | ProtocolVersionId::Version2 + | ProtocolVersionId::Version3 + | ProtocolVersionId::Version4 + | ProtocolVersionId::Version5 + | ProtocolVersionId::Version6 + | ProtocolVersionId::Version7 + | ProtocolVersionId::Version8 + | ProtocolVersionId::Version9 + | ProtocolVersionId::Version10 + | ProtocolVersionId::Version11 + | ProtocolVersionId::Version12 => &self.pre_virtual_blocks, + ProtocolVersionId::Version13 => &self.post_virtual_blocks, + ProtocolVersionId::Version14 + | ProtocolVersionId::Version15 + | ProtocolVersionId::Version16 + | ProtocolVersionId::Version17 => &self.post_virtual_blocks_finish_upgrade_fix, + ProtocolVersionId::Version18 => &self.post_boojum, + ProtocolVersionId::Version19 => &self.post_allowlist_removal, + ProtocolVersionId::Version20 => &self.post_1_4_1, + ProtocolVersionId::Version21 | ProtocolVersionId::Version22 => &self.post_1_4_2, + ProtocolVersionId::Version23 => &self.vm_1_5_0_small_memory, + ProtocolVersionId::Version24 | ProtocolVersionId::Version25 => { + &self.vm_1_5_0_increased_memory + } + } + } + + pub(super) fn load_estimate_gas_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), + post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), + post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), + vm_1_5_0_increased_memory: + BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), + } + } + + pub(super) fn load_eth_call_blocking() -> Self { + Self { + pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), + post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), + post_virtual_blocks_finish_upgrade_fix: + BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), + post_boojum: BaseSystemContracts::playground_post_boojum(), + post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), + post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), + post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), + vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), + vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( + ), + } + } +} diff --git a/core/lib/vm_executor/src/oneshot/env.rs b/core/lib/vm_executor/src/oneshot/env.rs new file mode 100644 index 00000000000..51154d561ec --- /dev/null +++ b/core/lib/vm_executor/src/oneshot/env.rs @@ -0,0 +1,138 @@ +use std::marker::PhantomData; + +use anyhow::Context; +use zksync_dal::{Connection, Core}; +use zksync_multivm::interface::{OneshotEnv, TxExecutionMode}; +use zksync_types::{fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId}; + +use crate::oneshot::{contracts::MultiVMBaseSystemContracts, ResolvedBlockInfo}; + +/// Marker for [`OneshotEnvParameters`] used for gas estimation. +#[derive(Debug)] +pub struct EstimateGas(()); + +/// Marker for [`OneshotEnvParameters`] used for calls and/or transaction execution. +#[derive(Debug)] +pub struct CallOrExecute(()); + +/// Oneshot environment parameters that are expected to be constant or rarely change during the program lifetime. +/// These parameters can be used to create [a full environment](OneshotEnv) for transaction / call execution. +/// +/// Notably, these parameters include base system contracts (bootloader and default account abstraction) for all supported +/// VM versions. +#[derive(Debug)] +pub struct OneshotEnvParameters { + pub(super) chain_id: L2ChainId, + pub(super) base_system_contracts: MultiVMBaseSystemContracts, + pub(super) operator_account: AccountTreeId, + pub(super) validation_computational_gas_limit: u32, + _ty: PhantomData, +} + +impl OneshotEnvParameters { + /// Returns gas limit for account validation of transactions. + pub fn validation_computational_gas_limit(&self) -> u32 { + self.validation_computational_gas_limit + } +} + +impl OneshotEnvParameters { + /// Creates env parameters for gas estimation. + /// + /// System contracts (mainly, bootloader) for these params are tuned to provide accurate + /// execution metrics. + pub async fn for_gas_estimation( + chain_id: L2ChainId, + operator_account: AccountTreeId, + ) -> anyhow::Result { + Ok(Self { + chain_id, + base_system_contracts: tokio::task::spawn_blocking( + MultiVMBaseSystemContracts::load_estimate_gas_blocking, + ) + .await + .context("failed loading system contracts for gas estimation")?, + operator_account, + validation_computational_gas_limit: u32::MAX, + _ty: PhantomData, + }) + } + + /// Prepares environment for gas estimation. + pub async fn to_env( + &self, + connection: &mut Connection<'_, Core>, + resolved_block_info: &ResolvedBlockInfo, + fee_input: BatchFeeInput, + base_fee: u64, + ) -> anyhow::Result { + self.to_env_inner( + connection, + TxExecutionMode::EstimateFee, + resolved_block_info, + fee_input, + Some(base_fee), + ) + .await + } +} + +impl OneshotEnvParameters { + /// Creates env parameters for transaction / call execution. + /// + /// System contracts (mainly, bootloader) for these params tuned to provide better UX + /// experience (e.g. revert messages). + pub async fn for_execution( + chain_id: L2ChainId, + operator_account: AccountTreeId, + validation_computational_gas_limit: u32, + ) -> anyhow::Result { + Ok(Self { + chain_id, + base_system_contracts: tokio::task::spawn_blocking( + MultiVMBaseSystemContracts::load_eth_call_blocking, + ) + .await + .context("failed loading system contracts for calls")?, + operator_account, + validation_computational_gas_limit, + _ty: PhantomData, + }) + } + + /// Prepares environment for a call. + pub async fn to_call_env( + &self, + connection: &mut Connection<'_, Core>, + resolved_block_info: &ResolvedBlockInfo, + fee_input: BatchFeeInput, + enforced_base_fee: Option, + ) -> anyhow::Result { + self.to_env_inner( + connection, + TxExecutionMode::EthCall, + resolved_block_info, + fee_input, + enforced_base_fee, + ) + .await + } + + /// Prepares environment for executing a provided transaction. + pub async fn to_execute_env( + &self, + connection: &mut Connection<'_, Core>, + resolved_block_info: &ResolvedBlockInfo, + fee_input: BatchFeeInput, + tx: &L2Tx, + ) -> anyhow::Result { + self.to_env_inner( + connection, + TxExecutionMode::VerifyExecute, + resolved_block_info, + fee_input, + Some(tx.common_data.fee.max_fee_per_gas.as_u64()), + ) + .await + } +} diff --git a/core/lib/vm_executor/src/oneshot/mod.rs b/core/lib/vm_executor/src/oneshot/mod.rs index 1838381d2a0..cb75f396b5d 100644 --- a/core/lib/vm_executor/src/oneshot/mod.rs +++ b/core/lib/vm_executor/src/oneshot/mod.rs @@ -1,4 +1,10 @@ //! Oneshot VM executor. +//! +//! # Overview +//! +//! The root type of this module is [`MainOneshotExecutor`], a "default" [`OneshotExecutor`] implementation. +//! In addition to it, the module provides [`OneshotEnvParameters`] and [`BlockInfo`] / [`ResolvedBlockInfo`], +//! which can be used to prepare environment for `MainOneshotExecutor` (i.e., a [`OneshotEnv`] instance). use std::{ sync::Arc, @@ -20,7 +26,7 @@ use zksync_multivm::{ utils::adjust_pubdata_price_for_tx, vm_latest::HistoryDisabled, zk_evm_latest::ethereum_types::U256, - MultiVMTracer, VmInstance, + LegacyVmInstance, MultiVMTracer, }; use zksync_types::{ block::pack_block_info, @@ -32,8 +38,15 @@ use zksync_types::{ }; use zksync_utils::{h256_to_u256, u256_to_h256}; -pub use self::mock::MockOneshotExecutor; +pub use self::{ + block::{BlockInfo, ResolvedBlockInfo}, + env::{CallOrExecute, EstimateGas, OneshotEnvParameters}, + mock::MockOneshotExecutor, +}; +mod block; +mod contracts; +mod env; mod metrics; mod mock; @@ -98,7 +111,7 @@ where let mut result = executor.apply(|vm, transaction| { let (compression_result, tx_result) = vm .inspect_transaction_with_bytecode_compression( - tracers.into(), + &mut tracers.into(), transaction, true, ); @@ -152,7 +165,7 @@ where ); let exec_result = executor.apply(|vm, transaction| { vm.push_transaction(transaction); - vm.inspect(tracers.into(), VmExecutionMode::OneTx) + vm.inspect(&mut tracers.into(), VmExecutionMode::OneTx) }); let validation_result = Arc::make_mut(&mut validation_result) .take() @@ -171,7 +184,7 @@ where #[derive(Debug)] struct VmSandbox { - vm: Box>, + vm: Box>, storage_view: StoragePtr>, transaction: Transaction, execution_latency_histogram: Option<&'static vise::Histogram>, @@ -199,7 +212,7 @@ impl VmSandbox { }; let storage_view = storage_view.to_rc_ptr(); - let vm = Box::new(VmInstance::new_with_specific_version( + let vm = Box::new(LegacyVmInstance::new_with_specific_version( env.l1_batch, env.system, storage_view.clone(), @@ -264,7 +277,7 @@ impl VmSandbox { pub(super) fn apply(mut self, apply_fn: F) -> T where - F: FnOnce(&mut VmInstance, Transaction) -> T, + F: FnOnce(&mut LegacyVmInstance, Transaction) -> T, { let tx_id = format!( "{:?}-{}", diff --git a/core/lib/vm_executor/src/shared.rs b/core/lib/vm_executor/src/shared.rs index 8ac4dce2e01..382fd50e23a 100644 --- a/core/lib/vm_executor/src/shared.rs +++ b/core/lib/vm_executor/src/shared.rs @@ -5,6 +5,9 @@ use std::time::Duration; use vise::{Buckets, EncodeLabelSet, EncodeLabelValue, Family, Histogram, Metrics}; use zksync_multivm::interface::storage::StorageViewStats; +/// Marker for sealed traits. Intentionally not exported from the crate. +pub trait Sealed {} + #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "interaction", rename_all = "snake_case")] pub(crate) enum InteractionType { diff --git a/core/lib/vm_executor/src/storage.rs b/core/lib/vm_executor/src/storage.rs index e39748786a3..a2369820a5b 100644 --- a/core/lib/vm_executor/src/storage.rs +++ b/core/lib/vm_executor/src/storage.rs @@ -89,7 +89,15 @@ pub struct L1BatchParamsProvider { } impl L1BatchParamsProvider { - pub fn new() -> Self { + /// Creates a new provider. + pub async fn new(storage: &mut Connection<'_, Core>) -> anyhow::Result { + let mut this = Self::uninitialized(); + this.initialize(storage).await?; + Ok(this) + } + + /// Creates an uninitialized provider. Before use, it must be [`initialize`](Self::initialize())d. + pub fn uninitialized() -> Self { Self { snapshot: None } } @@ -323,4 +331,34 @@ impl L1BatchParamsProvider { chain_id, )) } + + /// Combines [`Self::load_first_l2_block_in_batch()`] and [Self::load_l1_batch_params()`]. Returns `Ok(None)` + /// iff the requested batch doesn't have any persisted blocks. + /// + /// Prefer using this method unless you need to manipulate / inspect the first block in the batch. + pub async fn load_l1_batch_env( + &self, + storage: &mut Connection<'_, Core>, + number: L1BatchNumber, + validation_computational_gas_limit: u32, + chain_id: L2ChainId, + ) -> anyhow::Result> { + let first_l2_block = self + .load_first_l2_block_in_batch(storage, number) + .await + .with_context(|| format!("failed loading first L2 block for L1 batch #{number}"))?; + let Some(first_l2_block) = first_l2_block else { + return Ok(None); + }; + + self.load_l1_batch_params( + storage, + &first_l2_block, + validation_computational_gas_limit, + chain_id, + ) + .await + .with_context(|| format!("failed loading params for L1 batch #{number}")) + .map(Some) + } } diff --git a/core/lib/vm_interface/Cargo.toml b/core/lib/vm_interface/Cargo.toml index 694576dca3b..8bff19ddc47 100644 --- a/core/lib/vm_interface/Cargo.toml +++ b/core/lib/vm_interface/Cargo.toml @@ -18,6 +18,7 @@ zksync_types.workspace = true anyhow.workspace = true async-trait.workspace = true hex.workspace = true +pretty_assertions.workspace = true serde.workspace = true thiserror.workspace = true tracing.workspace = true diff --git a/core/lib/vm_interface/src/lib.rs b/core/lib/vm_interface/src/lib.rs index 2b30f82e0ce..645e3e7c856 100644 --- a/core/lib/vm_interface/src/lib.rs +++ b/core/lib/vm_interface/src/lib.rs @@ -37,10 +37,11 @@ pub use crate::{ }, tracer, }, - vm::{VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled}, + vm::{VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, VmTrackingContracts}, }; pub mod executor; pub mod storage; mod types; +pub mod utils; mod vm; diff --git a/core/lib/vm_interface/src/storage/snapshot.rs b/core/lib/vm_interface/src/storage/snapshot.rs index a0175ff478a..78b57a31f13 100644 --- a/core/lib/vm_interface/src/storage/snapshot.rs +++ b/core/lib/vm_interface/src/storage/snapshot.rs @@ -12,7 +12,7 @@ use super::ReadStorage; /// In contrast, `StorageSnapshot` cannot be modified once created and is intended to represent a complete or almost complete snapshot /// for a particular VM execution. It can serve as a preloaded cache for a certain [`ReadStorage`] implementation /// that significantly reduces the number of storage accesses. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] pub struct StorageSnapshot { // `Option` encompasses entire map value for more efficient serialization storage: HashMap>, @@ -60,6 +60,36 @@ impl StorageSnapshot { } } +/// When used as a storage, a snapshot is assumed to be *complete*; [`ReadStorage`] methods will panic when called +/// with storage slots not present in the snapshot. +impl ReadStorage for StorageSnapshot { + fn read_value(&mut self, key: &StorageKey) -> StorageValue { + let entry = self + .storage + .get(&key.hashed_key()) + .unwrap_or_else(|| panic!("attempted to read from unknown storage slot: {key:?}")); + entry.unwrap_or_default().0 + } + + fn is_write_initial(&mut self, key: &StorageKey) -> bool { + let entry = self.storage.get(&key.hashed_key()).unwrap_or_else(|| { + panic!("attempted to check initialness for unknown storage slot: {key:?}") + }); + entry.is_none() + } + + fn load_factory_dep(&mut self, hash: H256) -> Option> { + self.factory_deps.get(&hash).map(|bytes| bytes.0.clone()) + } + + fn get_enumeration_index(&mut self, key: &StorageKey) -> Option { + let entry = self.storage.get(&key.hashed_key()).unwrap_or_else(|| { + panic!("attempted to get enum index for unknown storage slot: {key:?}") + }); + entry.map(|(_, idx)| idx) + } +} + /// [`StorageSnapshot`] wrapper implementing [`ReadStorage`] trait. Created using [`with_fallback()`](StorageSnapshot::with_fallback()). /// /// # Why fallback? diff --git a/core/lib/vm_interface/src/types/outputs/execution_result.rs b/core/lib/vm_interface/src/types/outputs/execution_result.rs index 6f9c02f0b58..3e53aad85f1 100644 --- a/core/lib/vm_interface/src/types/outputs/execution_result.rs +++ b/core/lib/vm_interface/src/types/outputs/execution_result.rs @@ -300,16 +300,16 @@ impl Call { /// Mid-level transaction execution output returned by a [batch executor](crate::executor::BatchExecutor). #[derive(Debug, Clone)] -pub struct BatchTransactionExecutionResult { +pub struct BatchTransactionExecutionResult> { /// VM result. pub tx_result: Box, /// Compressed bytecodes used by the transaction. - pub compressed_bytecodes: Vec, + pub compressed_bytecodes: C, /// Call traces (if requested; otherwise, empty). pub call_traces: Vec, } -impl BatchTransactionExecutionResult { +impl BatchTransactionExecutionResult { pub fn was_halted(&self) -> bool { matches!(self.tx_result.result, ExecutionResult::Halt { .. }) } diff --git a/core/lib/vm_interface/src/types/outputs/statistic.rs b/core/lib/vm_interface/src/types/outputs/statistic.rs index 095547076d4..f8e3851c832 100644 --- a/core/lib/vm_interface/src/types/outputs/statistic.rs +++ b/core/lib/vm_interface/src/types/outputs/statistic.rs @@ -109,7 +109,8 @@ pub struct VmExecutionStatistics { pub circuit_statistic: CircuitStatistic, } -/// Oracle metrics of the VM. +/// Oracle metrics reported by legacy VMs. +#[derive(Debug, Default)] pub struct VmMemoryMetrics { pub event_sink_inner: usize, pub event_sink_history: usize, diff --git a/core/lib/vm_interface/src/utils/dump.rs b/core/lib/vm_interface/src/utils/dump.rs new file mode 100644 index 00000000000..288c6445494 --- /dev/null +++ b/core/lib/vm_interface/src/utils/dump.rs @@ -0,0 +1,248 @@ +use std::collections::HashMap; + +use serde::{Deserialize, Serialize}; +use zksync_types::{block::L2BlockExecutionData, L1BatchNumber, L2BlockNumber, Transaction, H256}; + +use crate::{ + storage::{ReadStorage, StoragePtr, StorageSnapshot, StorageView}, + BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, SystemEnv, VmExecutionMode, + VmExecutionResultAndLogs, VmFactory, VmInterface, VmInterfaceExt, VmInterfaceHistoryEnabled, + VmTrackingContracts, +}; + +fn create_storage_snapshot( + storage: &StoragePtr>, + used_contract_hashes: Vec, +) -> StorageSnapshot { + let mut storage = storage.borrow_mut(); + let storage_cache = storage.cache(); + let mut storage_slots: HashMap<_, _> = storage_cache + .read_storage_keys() + .into_iter() + .map(|(key, value)| { + let enum_index = storage.get_enumeration_index(&key); + let value_and_index = enum_index.map(|idx| (value, idx)); + (key.hashed_key(), value_and_index) + }) + .collect(); + + // Normally, all writes are internally read in order to calculate their gas costs, so the code below + // is defensive programming. + for (key, _) in storage_cache.initial_writes() { + let hashed_key = key.hashed_key(); + if storage_slots.contains_key(&hashed_key) { + continue; + } + + let enum_index = storage.get_enumeration_index(&key); + let value_and_index = enum_index.map(|idx| (storage.read_value(&key), idx)); + storage_slots.insert(hashed_key, value_and_index); + } + + let factory_deps = used_contract_hashes + .into_iter() + .filter_map(|hash| Some((hash, storage.load_factory_dep(hash)?))) + .collect(); + + StorageSnapshot::new(storage_slots, factory_deps) +} + +/// VM dump allowing to re-run the VM on the same inputs. Can be (de)serialized. +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +pub struct VmDump { + pub l1_batch_env: L1BatchEnv, + pub system_env: SystemEnv, + pub l2_blocks: Vec, + pub storage: StorageSnapshot, +} + +impl VmDump { + pub fn l1_batch_number(&self) -> L1BatchNumber { + self.l1_batch_env.number + } + + /// Plays back this dump on the specified VM. + pub fn play_back(self) -> Vm + where + Vm: VmFactory>, + { + self.play_back_custom(Vm::new) + } + + /// Plays back this dump on a VM created using the provided closure. + #[doc(hidden)] // too low-level + pub fn play_back_custom( + self, + create_vm: impl FnOnce(L1BatchEnv, SystemEnv, StoragePtr>) -> Vm, + ) -> Vm { + let storage = StorageView::new(self.storage).to_rc_ptr(); + let mut vm = create_vm(self.l1_batch_env, self.system_env, storage); + + for (i, l2_block) in self.l2_blocks.into_iter().enumerate() { + if i > 0 { + // First block is already set. + vm.start_new_l2_block(L2BlockEnv { + number: l2_block.number.0, + timestamp: l2_block.timestamp, + prev_block_hash: l2_block.prev_block_hash, + max_virtual_blocks_to_create: l2_block.virtual_blocks, + }); + } + + for tx in l2_block.txs { + let tx_hash = tx.hash(); + let (compression_result, _) = + vm.execute_transaction_with_bytecode_compression(tx, true); + if let Err(err) = compression_result { + panic!("Failed compressing bytecodes for transaction {tx_hash:?}: {err}"); + } + } + } + vm.finish_batch(); + vm + } +} + +#[derive(Debug, Clone, Copy)] +struct L2BlocksSnapshot { + block_count: usize, + tx_count_in_last_block: usize, +} + +/// VM wrapper that can create [`VmDump`]s during execution. +#[derive(Debug)] +pub(super) struct DumpingVm { + storage: StoragePtr>, + inner: Vm, + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + l2_blocks: Vec, + l2_blocks_snapshot: Option, +} + +impl DumpingVm { + fn last_block_mut(&mut self) -> &mut L2BlockExecutionData { + self.l2_blocks.last_mut().unwrap() + } + + fn record_transaction(&mut self, tx: Transaction) { + self.last_block_mut().txs.push(tx); + } + + pub fn dump_state(&self) -> VmDump { + VmDump { + l1_batch_env: self.l1_batch_env.clone(), + system_env: self.system_env.clone(), + l2_blocks: self.l2_blocks.clone(), + storage: create_storage_snapshot(&self.storage, self.inner.used_contract_hashes()), + } + } +} + +impl VmInterface for DumpingVm { + type TracerDispatcher = Vm::TracerDispatcher; + + fn push_transaction(&mut self, tx: Transaction) { + self.record_transaction(tx.clone()); + self.inner.push_transaction(tx); + } + + fn inspect( + &mut self, + dispatcher: &mut Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + self.inner.inspect(dispatcher, execution_mode) + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.l2_blocks.push(L2BlockExecutionData { + number: L2BlockNumber(l2_block_env.number), + timestamp: l2_block_env.timestamp, + prev_block_hash: l2_block_env.prev_block_hash, + virtual_blocks: l2_block_env.max_virtual_blocks_to_create, + txs: vec![], + }); + self.inner.start_new_l2_block(l2_block_env); + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + tracer: &mut Self::TracerDispatcher, + tx: Transaction, + with_compression: bool, + ) -> (BytecodeCompressionResult, VmExecutionResultAndLogs) { + self.record_transaction(tx.clone()); + self.inner + .inspect_transaction_with_bytecode_compression(tracer, tx, with_compression) + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + self.inner.finish_batch() + } +} + +impl VmInterfaceHistoryEnabled for DumpingVm +where + S: ReadStorage, + Vm: VmInterfaceHistoryEnabled + VmTrackingContracts, +{ + fn make_snapshot(&mut self) { + self.l2_blocks_snapshot = Some(L2BlocksSnapshot { + block_count: self.l2_blocks.len(), + tx_count_in_last_block: self.last_block_mut().txs.len(), + }); + self.inner.make_snapshot(); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + self.inner.rollback_to_the_latest_snapshot(); + let snapshot = self + .l2_blocks_snapshot + .take() + .expect("rollback w/o snapshot"); + self.l2_blocks.truncate(snapshot.block_count); + assert_eq!( + self.l2_blocks.len(), + snapshot.block_count, + "L2 blocks were removed after creating a snapshot" + ); + self.last_block_mut() + .txs + .truncate(snapshot.tx_count_in_last_block); + } + + fn pop_snapshot_no_rollback(&mut self) { + self.inner.pop_snapshot_no_rollback(); + self.l2_blocks_snapshot = None; + } +} + +impl VmFactory> for DumpingVm +where + S: ReadStorage, + Vm: VmFactory> + VmTrackingContracts, +{ + fn new( + l1_batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + let inner = Vm::new(l1_batch_env.clone(), system_env.clone(), storage.clone()); + let first_block = L2BlockExecutionData { + number: L2BlockNumber(l1_batch_env.first_l2_block.number), + timestamp: l1_batch_env.first_l2_block.timestamp, + prev_block_hash: l1_batch_env.first_l2_block.prev_block_hash, + virtual_blocks: l1_batch_env.first_l2_block.max_virtual_blocks_to_create, + txs: vec![], + }; + Self { + l1_batch_env, + system_env, + l2_blocks: vec![first_block], + l2_blocks_snapshot: None, + storage, + inner, + } + } +} diff --git a/core/lib/vm_interface/src/utils/mod.rs b/core/lib/vm_interface/src/utils/mod.rs new file mode 100644 index 00000000000..80a51c7b144 --- /dev/null +++ b/core/lib/vm_interface/src/utils/mod.rs @@ -0,0 +1,9 @@ +//! Miscellaneous VM utils. + +pub use self::{ + dump::VmDump, + shadow::{DivergenceErrors, DivergenceHandler, ShadowVm}, +}; + +mod dump; +mod shadow; diff --git a/core/lib/vm_interface/src/utils/shadow.rs b/core/lib/vm_interface/src/utils/shadow.rs new file mode 100644 index 00000000000..92eb65a810f --- /dev/null +++ b/core/lib/vm_interface/src/utils/shadow.rs @@ -0,0 +1,491 @@ +use std::{ + cell::RefCell, + collections::{BTreeMap, BTreeSet}, + fmt, + sync::Arc, +}; + +use zksync_types::{StorageKey, StorageLog, StorageLogWithPreviousValue, Transaction}; + +use super::dump::{DumpingVm, VmDump}; +use crate::{ + storage::{ReadStorage, StoragePtr, StorageView}, + BytecodeCompressionResult, CurrentExecutionState, FinishedL1Batch, L1BatchEnv, L2BlockEnv, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmFactory, VmInterface, + VmInterfaceHistoryEnabled, VmTrackingContracts, +}; + +/// Handler for VM divergences. +#[derive(Clone)] +pub struct DivergenceHandler(Arc); + +impl fmt::Debug for DivergenceHandler { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + formatter + .debug_tuple("DivergenceHandler") + .field(&"_") + .finish() + } +} + +/// Default handler that panics. +impl Default for DivergenceHandler { + fn default() -> Self { + Self(Arc::new(|err, _| { + // There's no easy way to output the VM dump; it's too large to be logged. + panic!("{err}"); + })) + } +} + +impl DivergenceHandler { + /// Creates a new handler from the provided closure. + pub fn new(f: impl Fn(DivergenceErrors, VmDump) + Send + Sync + 'static) -> Self { + Self(Arc::new(f)) + } + + fn handle(&self, err: DivergenceErrors, dump: VmDump) { + self.0(err, dump); + } +} + +#[derive(Debug)] +struct VmWithReporting { + vm: Shadow, + divergence_handler: DivergenceHandler, +} + +impl VmWithReporting { + fn report(self, err: DivergenceErrors, dump: VmDump) { + tracing::error!("{err}"); + self.divergence_handler.handle(err, dump); + tracing::warn!( + "New VM is dropped; following VM actions will be executed only on the main VM" + ); + } +} + +/// Shadowed VM that executes 2 VMs for each operation and compares their outputs. +/// +/// If a divergence is detected, the VM state is dumped using [a pluggable handler](Self::set_dump_handler()), +/// after which the VM drops the shadowed VM (since it's assumed that its state can contain arbitrary garbage at this point). +#[derive(Debug)] +pub struct ShadowVm { + main: DumpingVm, + shadow: RefCell>>, +} + +impl ShadowVm +where + S: ReadStorage, + Main: VmTrackingContracts, + Shadow: VmInterface, +{ + /// Sets the divergence handler to be used by this VM. + pub fn set_divergence_handler(&mut self, handler: DivergenceHandler) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.divergence_handler = handler; + } + } + + /// Mutable ref is not necessary, but it automatically drops potential borrows. + fn report(&mut self, err: DivergenceErrors) { + self.report_shared(err); + } + + /// The caller is responsible for dropping any `shadow` borrows beforehand. + fn report_shared(&self, err: DivergenceErrors) { + self.shadow + .take() + .unwrap() + .report(err, self.main.dump_state()); + } + + /// Dumps the current VM state. + pub fn dump_state(&self) -> VmDump { + self.main.dump_state() + } +} + +impl ShadowVm +where + S: ReadStorage, + Main: VmFactory> + VmTrackingContracts, + Shadow: VmInterface, +{ + /// Creates a VM with a custom shadow storage. + pub fn with_custom_shadow( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + shadow_storage: StoragePtr, + ) -> Self + where + Shadow: VmFactory, + { + let main = DumpingVm::new(batch_env.clone(), system_env.clone(), storage.clone()); + let shadow = Shadow::new(batch_env.clone(), system_env.clone(), shadow_storage); + let shadow = VmWithReporting { + vm: shadow, + divergence_handler: DivergenceHandler::default(), + }; + Self { + main, + shadow: RefCell::new(Some(shadow)), + } + } +} + +impl VmFactory> for ShadowVm +where + S: ReadStorage, + Main: VmFactory> + VmTrackingContracts, + Shadow: VmFactory>, +{ + fn new( + batch_env: L1BatchEnv, + system_env: SystemEnv, + storage: StoragePtr>, + ) -> Self { + Self::with_custom_shadow(batch_env, system_env, storage.clone(), storage) + } +} + +/// **Important.** This doesn't properly handle tracers; they are not passed to the shadow VM! +impl VmInterface for ShadowVm +where + S: ReadStorage, + Main: VmTrackingContracts, + Shadow: VmInterface, +{ + type TracerDispatcher = ( +
::TracerDispatcher, + ::TracerDispatcher, + ); + + fn push_transaction(&mut self, tx: Transaction) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.push_transaction(tx.clone()); + } + self.main.push_transaction(tx); + } + + fn inspect( + &mut self, + (main_tracer, shadow_tracer): &mut Self::TracerDispatcher, + execution_mode: VmExecutionMode, + ) -> VmExecutionResultAndLogs { + let main_result = self.main.inspect(main_tracer, execution_mode); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_result = shadow.vm.inspect(shadow_tracer, execution_mode); + let mut errors = DivergenceErrors::new(); + errors.check_results_match(&main_result, &shadow_result); + + if let Err(err) = errors.into_result() { + let ctx = format!("executing VM with mode {execution_mode:?}"); + self.report(err.context(ctx)); + } + } + main_result + } + + fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv) { + self.main.start_new_l2_block(l2_block_env); + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.start_new_l2_block(l2_block_env); + } + } + + fn inspect_transaction_with_bytecode_compression( + &mut self, + (main_tracer, shadow_tracer): &mut Self::TracerDispatcher, + tx: Transaction, + with_compression: bool, + ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { + let tx_repr = format!("{tx:?}"); // includes little data, so is OK to call proactively + + let (main_bytecodes_result, main_tx_result) = + self.main.inspect_transaction_with_bytecode_compression( + main_tracer, + tx.clone(), + with_compression, + ); + // Extend lifetime to `'static` so that the result isn't mutably borrowed from the main VM. + // Unfortunately, there's no way to express that this borrow is actually immutable, which would allow not extending the lifetime unless there's a divergence. + let main_bytecodes_result = + main_bytecodes_result.map(|bytecodes| bytecodes.into_owned().into()); + + if let Some(shadow) = self.shadow.get_mut() { + let shadow_result = shadow.vm.inspect_transaction_with_bytecode_compression( + shadow_tracer, + tx, + with_compression, + ); + let mut errors = DivergenceErrors::new(); + errors.check_results_match(&main_tx_result, &shadow_result.1); + if let Err(err) = errors.into_result() { + let ctx = format!( + "inspecting transaction {tx_repr}, with_compression={with_compression:?}" + ); + self.report(err.context(ctx)); + } + } + (main_bytecodes_result, main_tx_result) + } + + fn finish_batch(&mut self) -> FinishedL1Batch { + let main_batch = self.main.finish_batch(); + if let Some(shadow) = self.shadow.get_mut() { + let shadow_batch = shadow.vm.finish_batch(); + let mut errors = DivergenceErrors::new(); + errors.check_results_match( + &main_batch.block_tip_execution_result, + &shadow_batch.block_tip_execution_result, + ); + errors.check_final_states_match( + &main_batch.final_execution_state, + &shadow_batch.final_execution_state, + ); + errors.check_match( + "final_bootloader_memory", + &main_batch.final_bootloader_memory, + &shadow_batch.final_bootloader_memory, + ); + errors.check_match( + "pubdata_input", + &main_batch.pubdata_input, + &shadow_batch.pubdata_input, + ); + errors.check_match( + "state_diffs", + &main_batch.state_diffs, + &shadow_batch.state_diffs, + ); + + if let Err(err) = errors.into_result() { + self.report(err); + } + } + main_batch + } +} + +#[derive(Debug)] +pub struct DivergenceErrors { + divergences: Vec, + context: Option, +} + +impl fmt::Display for DivergenceErrors { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(context) = &self.context { + write!( + formatter, + "VM execution diverged: {context}: [{}]", + self.divergences.join(", ") + ) + } else { + write!( + formatter, + "VM execution diverged: [{}]", + self.divergences.join(", ") + ) + } + } +} + +impl DivergenceErrors { + fn new() -> Self { + Self { + divergences: vec![], + context: None, + } + } + + fn context(mut self, context: String) -> Self { + self.context = Some(context); + self + } + + fn check_results_match( + &mut self, + main_result: &VmExecutionResultAndLogs, + shadow_result: &VmExecutionResultAndLogs, + ) { + self.check_match("result", &main_result.result, &shadow_result.result); + self.check_match( + "logs.events", + &main_result.logs.events, + &shadow_result.logs.events, + ); + self.check_match( + "logs.system_l2_to_l1_logs", + &main_result.logs.system_l2_to_l1_logs, + &shadow_result.logs.system_l2_to_l1_logs, + ); + self.check_match( + "logs.user_l2_to_l1_logs", + &main_result.logs.user_l2_to_l1_logs, + &shadow_result.logs.user_l2_to_l1_logs, + ); + let main_logs = UniqueStorageLogs::new(&main_result.logs.storage_logs); + let shadow_logs = UniqueStorageLogs::new(&shadow_result.logs.storage_logs); + self.check_match("logs.storage_logs", &main_logs, &shadow_logs); + self.check_match("refunds", &main_result.refunds, &shadow_result.refunds); + self.check_match( + "statistics.circuit_statistic", + &main_result.statistics.circuit_statistic, + &shadow_result.statistics.circuit_statistic, + ); + self.check_match( + "statistics.pubdata_published", + &main_result.statistics.pubdata_published, + &shadow_result.statistics.pubdata_published, + ); + self.check_match( + "statistics.gas_remaining", + &main_result.statistics.gas_remaining, + &shadow_result.statistics.gas_remaining, + ); + self.check_match( + "statistics.gas_used", + &main_result.statistics.gas_used, + &shadow_result.statistics.gas_used, + ); + self.check_match( + "statistics.computational_gas_used", + &main_result.statistics.computational_gas_used, + &shadow_result.statistics.computational_gas_used, + ); + } + + fn check_match(&mut self, context: &str, main: &T, shadow: &T) { + if main != shadow { + let comparison = pretty_assertions::Comparison::new(main, shadow); + let err = format!("`{context}` mismatch: {comparison}"); + self.divergences.push(err); + } + } + + fn check_final_states_match( + &mut self, + main: &CurrentExecutionState, + shadow: &CurrentExecutionState, + ) { + self.check_match("final_state.events", &main.events, &shadow.events); + self.check_match( + "final_state.user_l2_to_l1_logs", + &main.user_l2_to_l1_logs, + &shadow.user_l2_to_l1_logs, + ); + self.check_match( + "final_state.system_logs", + &main.system_logs, + &shadow.system_logs, + ); + self.check_match( + "final_state.storage_refunds", + &main.storage_refunds, + &shadow.storage_refunds, + ); + self.check_match( + "final_state.pubdata_costs", + &main.pubdata_costs, + &shadow.pubdata_costs, + ); + self.check_match( + "final_state.used_contract_hashes", + &main.used_contract_hashes.iter().collect::>(), + &shadow.used_contract_hashes.iter().collect::>(), + ); + + let main_deduplicated_logs = Self::gather_logs(&main.deduplicated_storage_logs); + let shadow_deduplicated_logs = Self::gather_logs(&shadow.deduplicated_storage_logs); + self.check_match( + "deduplicated_storage_logs", + &main_deduplicated_logs, + &shadow_deduplicated_logs, + ); + } + + fn gather_logs(logs: &[StorageLog]) -> BTreeMap { + logs.iter() + .filter(|log| log.is_write()) + .map(|log| (log.key, log)) + .collect() + } + + fn into_result(self) -> Result<(), Self> { + if self.divergences.is_empty() { + Ok(()) + } else { + Err(self) + } + } +} + +// The new VM doesn't support read logs yet, doesn't order logs by access and deduplicates them +// inside the VM, hence this auxiliary struct. +#[derive(PartialEq)] +struct UniqueStorageLogs(BTreeMap); + +impl fmt::Debug for UniqueStorageLogs { + fn fmt(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut map = formatter.debug_map(); + for log in self.0.values() { + map.entry( + &format!("{:?}:{:?}", log.log.key.address(), log.log.key.key()), + &format!("{:?} -> {:?}", log.previous_value, log.log.value), + ); + } + map.finish() + } +} + +impl UniqueStorageLogs { + fn new(logs: &[StorageLogWithPreviousValue]) -> Self { + let mut unique_logs = BTreeMap::::new(); + for log in logs { + if !log.log.is_write() { + continue; + } + if let Some(existing_log) = unique_logs.get_mut(&log.log.key) { + existing_log.log.value = log.log.value; + } else { + unique_logs.insert(log.log.key, *log); + } + } + + // Remove no-op write logs (i.e., X -> X writes) produced by the old VM. + unique_logs.retain(|_, log| log.previous_value != log.log.value); + Self(unique_logs) + } +} + +impl VmInterfaceHistoryEnabled for ShadowVm +where + S: ReadStorage, + Main: VmInterfaceHistoryEnabled + VmTrackingContracts, + Shadow: VmInterfaceHistoryEnabled, +{ + fn make_snapshot(&mut self) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.make_snapshot(); + } + self.main.make_snapshot(); + } + + fn rollback_to_the_latest_snapshot(&mut self) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.rollback_to_the_latest_snapshot(); + } + self.main.rollback_to_the_latest_snapshot(); + } + + fn pop_snapshot_no_rollback(&mut self) { + if let Some(shadow) = self.shadow.get_mut() { + shadow.vm.pop_snapshot_no_rollback(); + } + self.main.pop_snapshot_no_rollback(); + } +} diff --git a/core/lib/vm_interface/src/vm.rs b/core/lib/vm_interface/src/vm.rs index f70be52bd86..37e33a92b50 100644 --- a/core/lib/vm_interface/src/vm.rs +++ b/core/lib/vm_interface/src/vm.rs @@ -11,41 +11,39 @@ //! Generally speaking, in most cases, the tracer dispatcher is a wrapper around `Vec>`, //! where `VmTracer` is a trait implemented for a specific VM version. -use zksync_types::Transaction; +use zksync_types::{Transaction, H256}; use crate::{ storage::StoragePtr, BytecodeCompressionResult, FinishedL1Batch, L1BatchEnv, L2BlockEnv, - SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, VmMemoryMetrics, + SystemEnv, VmExecutionMode, VmExecutionResultAndLogs, }; pub trait VmInterface { + /// Lifetime is used to be able to define `Option<&mut _>` as a dispatcher. type TracerDispatcher: Default; /// Push transaction to bootloader memory. fn push_transaction(&mut self, tx: Transaction); - /// Execute next VM step (either next transaction or bootloader or the whole batch) + /// Executes the next VM step (either next transaction or bootloader or the whole batch) /// with custom tracers. fn inspect( &mut self, - dispatcher: Self::TracerDispatcher, + dispatcher: &mut Self::TracerDispatcher, execution_mode: VmExecutionMode, ) -> VmExecutionResultAndLogs; /// Start a new L2 block. fn start_new_l2_block(&mut self, l2_block_env: L2BlockEnv); - /// Execute transaction with optional bytecode compression using custom tracers. + /// Executes the provided transaction with optional bytecode compression using custom tracers. fn inspect_transaction_with_bytecode_compression( &mut self, - tracer: Self::TracerDispatcher, + tracer: &mut Self::TracerDispatcher, tx: Transaction, with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs); - /// Record VM memory metrics. - fn record_vm_memory_metrics(&self) -> VmMemoryMetrics; - /// Execute batch till the end and return the result, with final execution state /// and bootloader memory. fn finish_batch(&mut self) -> FinishedL1Batch; @@ -55,7 +53,7 @@ pub trait VmInterface { pub trait VmInterfaceExt: VmInterface { /// Executes the next VM step (either next transaction or bootloader or the whole batch). fn execute(&mut self, execution_mode: VmExecutionMode) -> VmExecutionResultAndLogs { - self.inspect(Self::TracerDispatcher::default(), execution_mode) + self.inspect(&mut ::default(), execution_mode) } /// Executes a transaction with optional bytecode compression. @@ -65,7 +63,7 @@ pub trait VmInterfaceExt: VmInterface { with_compression: bool, ) -> (BytecodeCompressionResult<'_>, VmExecutionResultAndLogs) { self.inspect_transaction_with_bytecode_compression( - Self::TracerDispatcher::default(), + &mut ::default(), tx, with_compression, ) @@ -103,3 +101,9 @@ pub trait VmInterfaceHistoryEnabled: VmInterface { /// (i.e., the VM must not panic in this case). fn pop_snapshot_no_rollback(&mut self); } + +/// VM that tracks decommitment of bytecodes during execution. This is required to create a [`VmDump`]. +pub trait VmTrackingContracts: VmInterface { + /// Returns hashes of all decommitted bytecodes. + fn used_contract_hashes(&self) -> Vec; +} diff --git a/core/lib/web3_decl/src/namespaces/debug.rs b/core/lib/web3_decl/src/namespaces/debug.rs index 1fbe3237104..8ca5622e95d 100644 --- a/core/lib/web3_decl/src/namespaces/debug.rs +++ b/core/lib/web3_decl/src/namespaces/debug.rs @@ -2,8 +2,7 @@ use jsonrpsee::core::RpcResult; use jsonrpsee::proc_macros::rpc; use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, - debug_flat_call::DebugCallFlat, + api::{BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, TracerConfig}, transaction_request::CallRequest, }; @@ -26,21 +25,14 @@ pub trait DebugNamespace { &self, block: BlockNumber, options: Option, - ) -> RpcResult>; - - #[method(name = "traceBlockByNumber.callFlatTracer")] - async fn trace_block_by_number_flat( - &self, - block: BlockNumber, - options: Option, - ) -> RpcResult>; + ) -> RpcResult; #[method(name = "traceBlockByHash")] async fn trace_block_by_hash( &self, hash: H256, options: Option, - ) -> RpcResult>; + ) -> RpcResult; #[method(name = "traceCall")] async fn trace_call( @@ -48,12 +40,12 @@ pub trait DebugNamespace { request: CallRequest, block: Option, options: Option, - ) -> RpcResult; + ) -> RpcResult; #[method(name = "traceTransaction")] async fn trace_transaction( &self, tx_hash: H256, options: Option, - ) -> RpcResult>; + ) -> RpcResult>; } diff --git a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs index 2d6af705f48..eb2170bcc84 100644 --- a/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs +++ b/core/lib/zksync_core_leftovers/src/temp_config_store/mod.rs @@ -26,15 +26,15 @@ use zksync_env_config::FromEnv; use zksync_protobuf::repr::ProtoRepr; use zksync_protobuf_config::proto::secrets::Secrets; -pub fn decode_yaml_repr(yaml: &str) -> anyhow::Result { - let d = serde_yaml::Deserializer::from_str(yaml); - let this: T = zksync_protobuf::serde::deserialize_proto_with_options(d, false)?; - this.read() -} - -pub fn read_yaml_repr(path_buf: PathBuf) -> anyhow::Result { - let yaml = std::fs::read_to_string(path_buf).context("failed reading YAML config")?; - decode_yaml_repr::(&yaml) +pub fn read_yaml_repr(path: &PathBuf) -> anyhow::Result { + (|| { + let yaml = std::fs::read_to_string(path)?; + zksync_protobuf::serde::Deserialize { + deny_unknown_fields: false, + } + .proto_repr_from_yaml::(&yaml) + })() + .with_context(|| format!("failed to read {}", path.display())) } // TODO (QIT-22): This structure is going to be removed when components will be responsible for their own configs. @@ -209,8 +209,7 @@ fn load_env_config() -> anyhow::Result { pub fn load_general_config(path: Option) -> anyhow::Result { match path { Some(path) => { - let yaml = std::fs::read_to_string(path).context("Failed to read general config")?; - decode_yaml_repr::(&yaml) + read_yaml_repr::(&path) } None => Ok(load_env_config() .context("general config from env")? @@ -221,8 +220,7 @@ pub fn load_general_config(path: Option) -> anyhow::Result) -> anyhow::Result { match path { Some(path) => { - let yaml = std::fs::read_to_string(path).context("Failed to read secrets")?; - let secrets = decode_yaml_repr::(&yaml).context("Failed to parse secrets")?; + let secrets = read_yaml_repr::(&path)?; Ok(secrets .database .context("failed to parse database secrets")?) diff --git a/core/node/api_server/Cargo.toml b/core/node/api_server/Cargo.toml index 040e2a94a11..d0723a9d23e 100644 --- a/core/node/api_server/Cargo.toml +++ b/core/node/api_server/Cargo.toml @@ -56,6 +56,7 @@ tower-http = { workspace = true, features = ["cors", "metrics"] } lru.workspace = true [dev-dependencies] +zk_evm_1_5_0.workspace = true zksync_node_genesis.workspace = true zksync_node_test_utils.workspace = true diff --git a/core/node/api_server/src/execution_sandbox/execute.rs b/core/node/api_server/src/execution_sandbox/execute.rs index d22d7de47d0..d974f2e9aa1 100644 --- a/core/node/api_server/src/execution_sandbox/execute.rs +++ b/core/node/api_server/src/execution_sandbox/execute.rs @@ -1,6 +1,10 @@ //! Implementation of "executing" methods, e.g. `eth_call`. +use std::time::{Duration, Instant}; + +use anyhow::Context as _; use async_trait::async_trait; +use tokio::runtime::Handle; use zksync_dal::{Connection, Core}; use zksync_multivm::interface::{ executor::{OneshotExecutor, TransactionValidator}, @@ -9,17 +13,71 @@ use zksync_multivm::interface::{ Call, OneshotEnv, OneshotTracingParams, OneshotTransactionExecutionResult, TransactionExecutionMetrics, TxExecutionArgs, VmExecutionResultAndLogs, }; -use zksync_types::{api::state_override::StateOverride, l2::L2Tx}; +use zksync_state::{PostgresStorage, PostgresStorageCaches}; +use zksync_types::{ + api::state_override::StateOverride, fee_model::BatchFeeInput, l2::L2Tx, Transaction, +}; use zksync_vm_executor::oneshot::{MainOneshotExecutor, MockOneshotExecutor}; use super::{ - apply, storage::StorageWithOverrides, vm_metrics, BlockArgs, TxSetupArgs, VmPermit, - SANDBOX_METRICS, + storage::StorageWithOverrides, + vm_metrics::{self, SandboxStage}, + BlockArgs, VmPermit, SANDBOX_METRICS, }; -use crate::execution_sandbox::vm_metrics::SandboxStage; +use crate::tx_sender::SandboxExecutorOptions; + +/// Action that can be executed by [`SandboxExecutor`]. +#[derive(Debug)] +pub(crate) enum SandboxAction { + /// Execute a transaction. + Execution { tx: L2Tx, fee_input: BatchFeeInput }, + /// Execute a call, possibly with tracing. + Call { + call: L2Tx, + fee_input: BatchFeeInput, + enforced_base_fee: Option, + tracing_params: OneshotTracingParams, + }, + /// Estimate gas for a transaction. + GasEstimation { + tx: Transaction, + fee_input: BatchFeeInput, + base_fee: u64, + }, +} + +impl SandboxAction { + fn factory_deps_count(&self) -> usize { + match self { + Self::Execution { tx, .. } | Self::Call { call: tx, .. } => { + tx.execute.factory_deps.len() + } + Self::GasEstimation { tx, .. } => tx.execute.factory_deps.len(), + } + } + fn into_parts(self) -> (TxExecutionArgs, OneshotTracingParams) { + match self { + Self::Execution { tx, .. } => ( + TxExecutionArgs::for_validation(tx), + OneshotTracingParams::default(), + ), + Self::GasEstimation { tx, .. } => ( + TxExecutionArgs::for_gas_estimate(tx), + OneshotTracingParams::default(), + ), + Self::Call { + call, + tracing_params, + .. + } => (TxExecutionArgs::for_eth_call(call), tracing_params), + } + } +} + +/// Output of [`SandboxExecutor::execute_in_sandbox()`]. #[derive(Debug, Clone)] -pub struct TransactionExecutionOutput { +pub(crate) struct SandboxExecutionOutput { /// Output of the VM. pub vm: VmExecutionResultAndLogs, /// Traced calls if requested. @@ -30,42 +88,64 @@ pub struct TransactionExecutionOutput { pub are_published_bytecodes_ok: bool, } -/// Executor of transactions. #[derive(Debug)] -pub enum TransactionExecutor { +enum SandboxExecutorEngine { Real(MainOneshotExecutor), - #[doc(hidden)] // Intended for tests only Mock(MockOneshotExecutor), } -impl TransactionExecutor { - pub fn real(missed_storage_invocation_limit: usize) -> Self { +/// Executor of transactions / calls used in the API server. +#[derive(Debug)] +pub(crate) struct SandboxExecutor { + engine: SandboxExecutorEngine, + pub(super) options: SandboxExecutorOptions, + storage_caches: Option, +} + +impl SandboxExecutor { + pub fn real( + options: SandboxExecutorOptions, + caches: PostgresStorageCaches, + missed_storage_invocation_limit: usize, + ) -> Self { let mut executor = MainOneshotExecutor::new(missed_storage_invocation_limit); executor .set_execution_latency_histogram(&SANDBOX_METRICS.sandbox[&SandboxStage::Execution]); - Self::Real(executor) + Self { + engine: SandboxExecutorEngine::Real(executor), + options, + storage_caches: Some(caches), + } + } + + pub(crate) async fn mock(executor: MockOneshotExecutor) -> Self { + Self { + engine: SandboxExecutorEngine::Mock(executor), + options: SandboxExecutorOptions::mock().await, + storage_caches: None, + } } /// This method assumes that (block with number `resolved_block_number` is present in DB) /// or (`block_id` is `pending` and block with number `resolved_block_number - 1` is present in DB) #[allow(clippy::too_many_arguments)] #[tracing::instrument(level = "debug", skip_all)] - pub async fn execute_tx_in_sandbox( + pub async fn execute_in_sandbox( &self, vm_permit: VmPermit, - setup_args: TxSetupArgs, - execution_args: TxExecutionArgs, connection: Connection<'static, Core>, - block_args: BlockArgs, + action: SandboxAction, + block_args: &BlockArgs, state_override: Option, - tracing_params: OneshotTracingParams, - ) -> anyhow::Result { - let total_factory_deps = execution_args.transaction.execute.factory_deps.len() as u16; - let (env, storage) = - apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; + ) -> anyhow::Result { + let total_factory_deps = action.factory_deps_count() as u16; + let (env, storage) = self + .prepare_env_and_storage(connection, block_args, &action) + .await?; + let state_override = state_override.unwrap_or_default(); let storage = StorageWithOverrides::new(storage, &state_override); - + let (execution_args, tracing_params) = action.into_parts(); let result = self .inspect_transaction_with_bytecode_compression( storage, @@ -78,23 +158,88 @@ impl TransactionExecutor { let metrics = vm_metrics::collect_tx_execution_metrics(total_factory_deps, &result.tx_result); - Ok(TransactionExecutionOutput { + Ok(SandboxExecutionOutput { vm: *result.tx_result, call_traces: result.call_traces, metrics, are_published_bytecodes_ok: result.compression_result.is_ok(), }) } -} -impl From for TransactionExecutor { - fn from(executor: MockOneshotExecutor) -> Self { - Self::Mock(executor) + pub(super) async fn prepare_env_and_storage( + &self, + mut connection: Connection<'static, Core>, + block_args: &BlockArgs, + action: &SandboxAction, + ) -> anyhow::Result<(OneshotEnv, PostgresStorage<'static>)> { + let initialization_stage = SANDBOX_METRICS.sandbox[&SandboxStage::Initialization].start(); + let resolve_started_at = Instant::now(); + let resolve_time = resolve_started_at.elapsed(); + let resolved_block_info = block_args.inner.resolve(&mut connection).await?; + // We don't want to emit too many logs. + if resolve_time > Duration::from_millis(10) { + tracing::debug!("Resolved block numbers (took {resolve_time:?})"); + } + + let env = match action { + SandboxAction::Execution { fee_input, tx } => { + self.options + .eth_call + .to_execute_env(&mut connection, &resolved_block_info, *fee_input, tx) + .await? + } + &SandboxAction::Call { + fee_input, + enforced_base_fee, + .. + } => { + self.options + .eth_call + .to_call_env( + &mut connection, + &resolved_block_info, + fee_input, + enforced_base_fee, + ) + .await? + } + &SandboxAction::GasEstimation { + fee_input, + base_fee, + .. + } => { + self.options + .estimate_gas + .to_env(&mut connection, &resolved_block_info, fee_input, base_fee) + .await? + } + }; + + if block_args.resolves_to_latest_sealed_l2_block() { + if let Some(caches) = &self.storage_caches { + caches.schedule_values_update(resolved_block_info.state_l2_block_number()); + } + } + + let mut storage = PostgresStorage::new_async( + Handle::current(), + connection, + resolved_block_info.state_l2_block_number(), + false, + ) + .await + .context("cannot create `PostgresStorage`")?; + + if let Some(caches) = &self.storage_caches { + storage = storage.with_caches(caches.clone()); + } + initialization_stage.observe(); + Ok((env, storage)) } } #[async_trait] -impl OneshotExecutor for TransactionExecutor +impl OneshotExecutor for SandboxExecutor where S: ReadStorage + Send + 'static, { @@ -105,8 +250,8 @@ where args: TxExecutionArgs, tracing_params: OneshotTracingParams, ) -> anyhow::Result { - match self { - Self::Real(executor) => { + match &self.engine { + SandboxExecutorEngine::Real(executor) => { executor .inspect_transaction_with_bytecode_compression( storage, @@ -116,7 +261,7 @@ where ) .await } - Self::Mock(executor) => { + SandboxExecutorEngine::Mock(executor) => { executor .inspect_transaction_with_bytecode_compression( storage, @@ -131,7 +276,7 @@ where } #[async_trait] -impl TransactionValidator for TransactionExecutor +impl TransactionValidator for SandboxExecutor where S: ReadStorage + Send + 'static, { @@ -142,13 +287,13 @@ where tx: L2Tx, validation_params: ValidationParams, ) -> anyhow::Result> { - match self { - Self::Real(executor) => { + match &self.engine { + SandboxExecutorEngine::Real(executor) => { executor .validate_transaction(storage, env, tx, validation_params) .await } - Self::Mock(executor) => { + SandboxExecutorEngine::Mock(executor) => { executor .validate_transaction(storage, env, tx, validation_params) .await diff --git a/core/node/api_server/src/execution_sandbox/mod.rs b/core/node/api_server/src/execution_sandbox/mod.rs index 79c6123642c..36f10b8e9b0 100644 --- a/core/node/api_server/src/execution_sandbox/mod.rs +++ b/core/node/api_server/src/execution_sandbox/mod.rs @@ -6,23 +6,21 @@ use std::{ use anyhow::Context as _; use rand::{thread_rng, Rng}; use zksync_dal::{pruning_dal::PruningInfo, Connection, Core, CoreDal, DalError}; -use zksync_multivm::interface::TxExecutionMode; -use zksync_state::PostgresStorageCaches; +use zksync_multivm::utils::get_eth_call_gas_limit; use zksync_types::{ - api, fee_model::BatchFeeInput, AccountTreeId, Address, L1BatchNumber, L2BlockNumber, L2ChainId, + api, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, ProtocolVersionId, U256, }; +use zksync_vm_executor::oneshot::BlockInfo; -pub use self::execute::TransactionExecutor; // FIXME (PLA-1018): remove use self::vm_metrics::SandboxStage; pub(super) use self::{ error::SandboxExecutionError, + execute::{SandboxAction, SandboxExecutor}, validate::ValidationError, vm_metrics::{SubmitTxStage, SANDBOX_METRICS}, }; -use super::tx_sender::MultiVMBaseSystemContracts; // Note: keep the modules private, and instead re-export functions that make public interface. -mod apply; mod error; mod execute; mod storage; @@ -136,53 +134,6 @@ impl VmConcurrencyLimiter { } } -async fn get_pending_state( - connection: &mut Connection<'_, Core>, -) -> anyhow::Result<(api::BlockId, L2BlockNumber)> { - let block_id = api::BlockId::Number(api::BlockNumber::Pending); - let resolved_block_number = connection - .blocks_web3_dal() - .resolve_block_id(block_id) - .await - .map_err(DalError::generalize)? - .context("pending block should always be present in Postgres")?; - Ok((block_id, resolved_block_number)) -} - -/// Arguments for VM execution necessary to set up storage and environment. -#[derive(Debug, Clone)] -pub struct TxSetupArgs { - pub execution_mode: TxExecutionMode, - pub operator_account: AccountTreeId, - pub fee_input: BatchFeeInput, - pub base_system_contracts: MultiVMBaseSystemContracts, - pub caches: PostgresStorageCaches, - pub validation_computational_gas_limit: u32, - pub chain_id: L2ChainId, - pub whitelisted_tokens_for_aa: Vec
, - pub enforced_base_fee: Option, -} - -impl TxSetupArgs { - #[cfg(test)] - pub fn mock( - execution_mode: TxExecutionMode, - base_system_contracts: MultiVMBaseSystemContracts, - ) -> Self { - Self { - execution_mode, - operator_account: AccountTreeId::default(), - fee_input: BatchFeeInput::l1_pegged(55, 555), - base_system_contracts, - caches: PostgresStorageCaches::new(1, 1), - validation_computational_gas_limit: u32::MAX, - chain_id: L2ChainId::default(), - whitelisted_tokens_for_aa: vec![], - enforced_base_fee: None, - } - } -} - #[derive(Debug, Clone, Copy)] struct BlockStartInfoInner { info: PruningInfo, @@ -335,19 +286,17 @@ pub enum BlockArgsError { /// Information about a block provided to VM. #[derive(Debug, Clone, Copy)] -pub struct BlockArgs { +pub(crate) struct BlockArgs { + inner: BlockInfo, block_id: api::BlockId, - resolved_block_number: L2BlockNumber, - l1_batch_timestamp_s: Option, } impl BlockArgs { - pub(crate) async fn pending(connection: &mut Connection<'_, Core>) -> anyhow::Result { - let (block_id, resolved_block_number) = get_pending_state(connection).await?; + pub async fn pending(connection: &mut Connection<'_, Core>) -> anyhow::Result { + let inner = BlockInfo::pending(connection).await?; Ok(Self { - block_id, - resolved_block_number, - l1_batch_timestamp_s: None, + inner, + block_id: api::BlockId::Number(api::BlockNumber::Pending), }) } @@ -365,7 +314,7 @@ impl BlockArgs { .await?; if block_id == api::BlockId::Number(api::BlockNumber::Pending) { - return Ok(BlockArgs::pending(connection).await?); + return Ok(Self::pending(connection).await?); } let resolved_block_number = connection @@ -373,32 +322,25 @@ impl BlockArgs { .resolve_block_id(block_id) .await .map_err(DalError::generalize)?; - let Some(resolved_block_number) = resolved_block_number else { + let Some(block_number) = resolved_block_number else { return Err(BlockArgsError::Missing); }; - let l1_batch = connection - .storage_web3_dal() - .resolve_l1_batch_number_of_l2_block(resolved_block_number) - .await - .with_context(|| { - format!("failed resolving L1 batch number of L2 block #{resolved_block_number}") - })?; - let l1_batch_timestamp = connection - .blocks_web3_dal() - .get_expected_l1_batch_timestamp(&l1_batch) - .await - .map_err(DalError::generalize)? - .context("missing timestamp for non-pending block")?; Ok(Self { + inner: BlockInfo::for_existing_block(connection, block_number).await?, block_id, - resolved_block_number, - l1_batch_timestamp_s: Some(l1_batch_timestamp), }) } pub fn resolved_block_number(&self) -> L2BlockNumber { - self.resolved_block_number + self.inner.block_number() + } + + fn is_pending(&self) -> bool { + matches!( + self.block_id, + api::BlockId::Number(api::BlockNumber::Pending) + ) } pub fn resolves_to_latest_sealed_l2_block(&self) -> bool { @@ -409,4 +351,30 @@ impl BlockArgs { ) ) } + + pub async fn historical_fee_input( + &self, + connection: &mut Connection<'_, Core>, + ) -> anyhow::Result { + self.inner.historical_fee_input(connection).await + } + + pub async fn default_eth_call_gas( + &self, + connection: &mut Connection<'_, Core>, + ) -> anyhow::Result { + let protocol_version = if self.is_pending() { + connection.blocks_dal().pending_protocol_version().await? + } else { + let block_number = self.inner.block_number(); + connection + .blocks_dal() + .get_l2_block_header(block_number) + .await? + .with_context(|| format!("missing header for resolved block #{block_number}"))? + .protocol_version + .unwrap_or_else(ProtocolVersionId::last_potentially_undefined) + }; + Ok(get_eth_call_gas_limit(protocol_version.into()).into()) + } } diff --git a/core/node/api_server/src/execution_sandbox/storage.rs b/core/node/api_server/src/execution_sandbox/storage.rs index 0d4c88d4a0a..bf775d48490 100644 --- a/core/node/api_server/src/execution_sandbox/storage.rs +++ b/core/node/api_server/src/execution_sandbox/storage.rs @@ -8,7 +8,7 @@ use std::{ use zksync_multivm::interface::storage::ReadStorage; use zksync_types::{ api::state_override::{OverrideState, StateOverride}, - get_code_key, get_nonce_key, + get_code_key, get_known_code_key, get_nonce_key, utils::{decompose_full_nonce, nonces_to_full_nonce, storage_key_for_eth_balance}, AccountTreeId, StorageKey, StorageValue, H256, }; @@ -56,6 +56,9 @@ impl StorageWithOverrides { let code_key = get_code_key(account); let code_hash = code.hash(); self.overridden_slots.insert(code_key, code_hash); + let known_code_key = get_known_code_key(&code_hash); + self.overridden_slots + .insert(known_code_key, H256::from_low_u64_be(1)); self.store_factory_dep(code_hash, code.clone().into_bytes()); } diff --git a/core/node/api_server/src/execution_sandbox/tests.rs b/core/node/api_server/src/execution_sandbox/tests.rs index 35103779a49..306018e1543 100644 --- a/core/node/api_server/src/execution_sandbox/tests.rs +++ b/core/node/api_server/src/execution_sandbox/tests.rs @@ -5,27 +5,22 @@ use std::collections::HashMap; use assert_matches::assert_matches; use test_casing::test_casing; use zksync_dal::ConnectionPool; -use zksync_multivm::{ - interface::{ - executor::{OneshotExecutor, TransactionValidator}, - tracer::ValidationError, - Halt, OneshotTracingParams, TxExecutionArgs, - }, - utils::derive_base_fee_and_gas_per_pubdata, -}; +use zksync_multivm::{interface::ExecutionResult, utils::derive_base_fee_and_gas_per_pubdata}; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_state::PostgresStorageCaches; use zksync_types::{ api::state_override::{OverrideAccount, StateOverride}, fee::Fee, - l2::L2Tx, - transaction_request::PaymasterParams, - K256PrivateKey, Nonce, ProtocolVersionId, Transaction, U256, + fee_model::BatchFeeInput, + K256PrivateKey, ProtocolVersionId, Transaction, U256, }; -use zksync_vm_executor::oneshot::MainOneshotExecutor; -use super::{storage::StorageWithOverrides, *}; -use crate::tx_sender::ApiContracts; +use super::*; +use crate::{ + execution_sandbox::execute::SandboxExecutor, testonly::TestAccount, + tx_sender::SandboxExecutorOptions, +}; #[tokio::test] async fn creating_block_args() { @@ -46,8 +41,9 @@ async fn creating_block_args() { pending_block_args.block_id, api::BlockId::Number(api::BlockNumber::Pending) ); - assert_eq!(pending_block_args.resolved_block_number, L2BlockNumber(2)); - assert_eq!(pending_block_args.l1_batch_timestamp_s, None); + assert_eq!(pending_block_args.resolved_block_number(), L2BlockNumber(2)); + assert_eq!(pending_block_args.inner.l1_batch_timestamp(), None); + assert!(pending_block_args.is_pending()); let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) .await @@ -66,9 +62,9 @@ async fn creating_block_args() { .await .unwrap(); assert_eq!(latest_block_args.block_id, latest_block); - assert_eq!(latest_block_args.resolved_block_number, L2BlockNumber(1)); + assert_eq!(latest_block_args.resolved_block_number(), L2BlockNumber(1)); assert_eq!( - latest_block_args.l1_batch_timestamp_s, + latest_block_args.inner.l1_batch_timestamp(), Some(l2_block.timestamp) ); @@ -77,8 +73,11 @@ async fn creating_block_args() { .await .unwrap(); assert_eq!(earliest_block_args.block_id, earliest_block); - assert_eq!(earliest_block_args.resolved_block_number, L2BlockNumber(0)); - assert_eq!(earliest_block_args.l1_batch_timestamp_s, Some(0)); + assert_eq!( + earliest_block_args.resolved_block_number(), + L2BlockNumber(0) + ); + assert_eq!(earliest_block_args.inner.l1_batch_timestamp(), Some(0)); let missing_block = api::BlockId::Number(100.into()); let err = BlockArgs::new(&mut storage, missing_block, &start_info) @@ -100,10 +99,10 @@ async fn creating_block_args_after_snapshot_recovery() { api::BlockId::Number(api::BlockNumber::Pending) ); assert_eq!( - pending_block_args.resolved_block_number, + pending_block_args.resolved_block_number(), snapshot_recovery.l2_block_number + 1 ); - assert_eq!(pending_block_args.l1_batch_timestamp_s, None); + assert!(pending_block_args.is_pending()); let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) .await @@ -159,9 +158,9 @@ async fn creating_block_args_after_snapshot_recovery() { .await .unwrap(); assert_eq!(latest_block_args.block_id, latest_block); - assert_eq!(latest_block_args.resolved_block_number, l2_block.number); + assert_eq!(latest_block_args.resolved_block_number(), l2_block.number); assert_eq!( - latest_block_args.l1_batch_timestamp_s, + latest_block_args.inner.l1_batch_timestamp(), Some(l2_block.timestamp) ); @@ -203,50 +202,41 @@ async fn estimating_gas() { } async fn test_instantiating_vm(connection: Connection<'static, Core>, block_args: BlockArgs) { - let estimate_gas_contracts = ApiContracts::load_from_disk().await.unwrap().estimate_gas; - let mut setup_args = TxSetupArgs::mock(TxExecutionMode::EstimateFee, estimate_gas_contracts); - let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( - setup_args.fee_input, - ProtocolVersionId::latest().into(), + let executor = SandboxExecutor::real( + SandboxExecutorOptions::mock().await, + PostgresStorageCaches::new(1, 1), + usize::MAX, ); - setup_args.enforced_base_fee = Some(base_fee); - let transaction = Transaction::from(create_transfer(base_fee, gas_per_pubdata)); - let execution_args = TxExecutionArgs::for_gas_estimate(transaction.clone()); - let (env, storage) = apply::prepare_env_and_storage(connection, setup_args, &block_args) - .await - .unwrap(); - let storage = StorageWithOverrides::new(storage, &StateOverride::default()); + let fee_input = BatchFeeInput::l1_pegged(55, 555); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = K256PrivateKey::random().create_transfer_with_fee( + 0.into(), + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); + let tx = Transaction::from(tx); - let tracing_params = OneshotTracingParams::default(); - let output = MainOneshotExecutor::new(usize::MAX) - .inspect_transaction_with_bytecode_compression(storage, env, execution_args, tracing_params) + let (limiter, _) = VmConcurrencyLimiter::new(1); + let vm_permit = limiter.acquire().await.unwrap(); + let action = SandboxAction::GasEstimation { + fee_input, + base_fee, + tx, + }; + let output = executor + .execute_in_sandbox(vm_permit, connection, action, &block_args, None) .await .unwrap(); - output.compression_result.unwrap(); - let tx_result = *output.tx_result; - assert!(!tx_result.result.is_failed(), "{tx_result:#?}"); -} -fn create_transfer(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { - let fee = Fee { - gas_limit: 200_000.into(), - max_fee_per_gas: fee_per_gas.into(), - max_priority_fee_per_gas: 0_u64.into(), - gas_per_pubdata_limit: gas_per_pubdata.into(), - }; - L2Tx::new_signed( - Address::random(), - vec![], - Nonce(0), - fee, - U256::zero(), - L2ChainId::default(), - &K256PrivateKey::random(), - vec![], - PaymasterParams::default(), - ) - .unwrap() + assert!(output.are_published_bytecodes_ok); + let tx_result = output.vm; + assert!(!tx_result.result.is_failed(), "{tx_result:#?}"); } #[test_casing(2, [false, true])] @@ -260,47 +250,55 @@ async fn validating_transaction(set_balance: bool) { let block_args = BlockArgs::pending(&mut connection).await.unwrap(); - let call_contracts = ApiContracts::load_from_disk().await.unwrap().eth_call; - let mut setup_args = TxSetupArgs::mock(TxExecutionMode::VerifyExecute, call_contracts); - let (base_fee, gas_per_pubdata) = derive_base_fee_and_gas_per_pubdata( - setup_args.fee_input, - ProtocolVersionId::latest().into(), + let executor = SandboxExecutor::real( + SandboxExecutorOptions::mock().await, + PostgresStorageCaches::new(1, 1), + usize::MAX, ); - setup_args.enforced_base_fee = Some(base_fee); - let transaction = create_transfer(base_fee, gas_per_pubdata); - let validation_params = - validate::get_validation_params(&mut connection, &transaction, u32::MAX, &[]) - .await - .unwrap(); - let (env, storage) = apply::prepare_env_and_storage(connection, setup_args, &block_args) - .await - .unwrap(); + let fee_input = BatchFeeInput::l1_pegged(55, 555); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = K256PrivateKey::random().create_transfer_with_fee( + 0.into(), + Fee { + gas_limit: 200_000.into(), + max_fee_per_gas: base_fee.into(), + max_priority_fee_per_gas: 0.into(), + gas_per_pubdata_limit: gas_per_pubdata.into(), + }, + ); + + let (limiter, _) = VmConcurrencyLimiter::new(1); + let vm_permit = limiter.acquire().await.unwrap(); let state_override = if set_balance { let account_override = OverrideAccount { balance: Some(U256::from(1) << 128), ..OverrideAccount::default() }; - StateOverride::new(HashMap::from([( - transaction.initiator_account(), - account_override, - )])) + StateOverride::new(HashMap::from([(tx.initiator_account(), account_override)])) } else { StateOverride::default() }; - let storage = StorageWithOverrides::new(storage, &state_override); - let validation_result = MainOneshotExecutor::new(usize::MAX) - .validate_transaction(storage, env, transaction, validation_params) + let result = executor + .execute_in_sandbox( + vm_permit, + connection, + SandboxAction::Execution { tx, fee_input }, + &block_args, + Some(state_override), + ) .await .unwrap(); + + let result = result.vm.result; if set_balance { - validation_result.expect("validation failed"); + assert_matches!(result, ExecutionResult::Success { .. }); } else { assert_matches!( - validation_result.unwrap_err(), - ValidationError::FailedTx(Halt::ValidationFailed(reason)) - if reason.to_string().contains("Not enough balance") + result, + ExecutionResult::Halt { reason } if reason.to_string().contains("Not enough balance") ); } } diff --git a/core/node/api_server/src/execution_sandbox/validate.rs b/core/node/api_server/src/execution_sandbox/validate.rs index e9087e608ee..9a3c88f8bf0 100644 --- a/core/node/api_server/src/execution_sandbox/validate.rs +++ b/core/node/api_server/src/execution_sandbox/validate.rs @@ -8,16 +8,15 @@ use zksync_multivm::interface::{ tracer::{ValidationError as RawValidationError, ValidationParams}, }; use zksync_types::{ - api::state_override::StateOverride, l2::L2Tx, Address, TRUSTED_ADDRESS_SLOTS, - TRUSTED_TOKEN_SLOTS, + api::state_override::StateOverride, fee_model::BatchFeeInput, l2::L2Tx, Address, + TRUSTED_ADDRESS_SLOTS, TRUSTED_TOKEN_SLOTS, }; use super::{ - apply, - execute::TransactionExecutor, + execute::{SandboxAction, SandboxExecutor}, storage::StorageWithOverrides, vm_metrics::{SandboxStage, EXECUTION_METRICS, SANDBOX_METRICS}, - BlockArgs, TxSetupArgs, VmPermit, + BlockArgs, VmPermit, }; /// Validation error used by the sandbox. Besides validation errors returned by VM, it also includes an internal error @@ -30,29 +29,34 @@ pub(crate) enum ValidationError { Internal(#[from] anyhow::Error), } -impl TransactionExecutor { +impl SandboxExecutor { #[tracing::instrument(level = "debug", skip_all)] pub(crate) async fn validate_tx_in_sandbox( &self, - mut connection: Connection<'static, Core>, vm_permit: VmPermit, + mut connection: Connection<'static, Core>, tx: L2Tx, - setup_args: TxSetupArgs, block_args: BlockArgs, - computational_gas_limit: u32, + fee_input: BatchFeeInput, + whitelisted_tokens_for_aa: &[Address], ) -> Result<(), ValidationError> { let total_latency = SANDBOX_METRICS.sandbox[&SandboxStage::ValidateInSandbox].start(); let validation_params = get_validation_params( &mut connection, &tx, - computational_gas_limit, - &setup_args.whitelisted_tokens_for_aa, + self.options.eth_call.validation_computational_gas_limit(), + whitelisted_tokens_for_aa, ) .await .context("failed getting validation params")?; - let (env, storage) = - apply::prepare_env_and_storage(connection, setup_args, &block_args).await?; + let action = SandboxAction::Execution { fee_input, tx }; + let (env, storage) = self + .prepare_env_and_storage(connection, &block_args, &action) + .await?; + let SandboxAction::Execution { tx, .. } = action else { + unreachable!(); // by construction + }; let storage = StorageWithOverrides::new(storage, &StateOverride::default()); let stage_latency = SANDBOX_METRICS.sandbox[&SandboxStage::Validation].start(); diff --git a/core/node/api_server/src/execution_sandbox/vm_metrics.rs b/core/node/api_server/src/execution_sandbox/vm_metrics.rs index cbfe7e90bd0..613475b6ef9 100644 --- a/core/node/api_server/src/execution_sandbox/vm_metrics.rs +++ b/core/node/api_server/src/execution_sandbox/vm_metrics.rs @@ -89,8 +89,18 @@ pub(crate) struct SandboxMetrics { pub(super) sandbox_execution_permits: Histogram, #[metrics(buckets = Buckets::LATENCIES)] submit_tx: Family>, + + /// Number of iterations necessary to estimate gas for a transaction. #[metrics(buckets = Buckets::linear(0.0..=30.0, 3.0))] pub estimate_gas_binary_search_iterations: Histogram, + /// Relative difference between the unscaled final gas estimate and the optimized lower bound. Positive if the lower bound + /// is (as expected) lower than the final gas estimate. + #[metrics(buckets = Buckets::linear(-0.05..=0.15, 0.01))] + pub estimate_gas_lower_bound_relative_diff: Histogram, + /// Relative difference between the optimistic gas limit and the unscaled final gas estimate. Positive if the optimistic gas limit + /// is (as expected) greater than the final gas estimate. + #[metrics(buckets = Buckets::linear(-0.05..=0.15, 0.01))] + pub estimate_gas_optimistic_gas_limit_relative_diff: Histogram, } impl SandboxMetrics { diff --git a/core/node/api_server/src/lib.rs b/core/node/api_server/src/lib.rs index 00a3a5632ce..c735bd50447 100644 --- a/core/node/api_server/src/lib.rs +++ b/core/node/api_server/src/lib.rs @@ -4,5 +4,7 @@ mod utils; pub mod execution_sandbox; pub mod healthcheck; +#[cfg(test)] +mod testonly; pub mod tx_sender; pub mod web3; diff --git a/core/node/api_server/src/testonly.rs b/core/node/api_server/src/testonly.rs new file mode 100644 index 00000000000..8dc7915385a --- /dev/null +++ b/core/node/api_server/src/testonly.rs @@ -0,0 +1,514 @@ +//! Test utils shared among multiple modules. + +use std::{collections::HashMap, iter}; + +use zk_evm_1_5_0::zkevm_opcode_defs::decoding::{EncodingModeProduction, VmEncodingMode}; +use zksync_contracts::{ + eth_contract, get_loadnext_contract, load_contract, read_bytecode, + test_contracts::LoadnextContractExecutionParams, +}; +use zksync_dal::{Connection, Core, CoreDal}; +use zksync_multivm::utils::derive_base_fee_and_gas_per_pubdata; +use zksync_node_fee_model::BatchFeeModelInputProvider; +use zksync_system_constants::L2_BASE_TOKEN_ADDRESS; +use zksync_types::{ + api::state_override::{Bytecode, OverrideAccount, OverrideState, StateOverride}, + ethabi, + ethabi::Token, + fee::Fee, + fee_model::FeeParams, + get_code_key, get_known_code_key, + l2::L2Tx, + transaction_request::{CallRequest, PaymasterParams}, + utils::storage_key_for_eth_balance, + AccountTreeId, Address, K256PrivateKey, L2BlockNumber, L2ChainId, Nonce, ProtocolVersionId, + StorageKey, StorageLog, H256, U256, +}; +use zksync_utils::{address_to_u256, u256_to_h256}; + +const EXPENSIVE_CONTRACT_PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/expensive/expensive.sol/Expensive.json"; +const PRECOMPILES_CONTRACT_PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/precompiles/precompiles.sol/Precompiles.json"; +const COUNTER_CONTRACT_PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/counter/counter.sol/Counter.json"; +const INFINITE_LOOP_CONTRACT_PATH: &str = + "etc/contracts-test-data/artifacts-zk/contracts/infinite/infinite.sol/InfiniteLoop.json"; +const MULTICALL3_CONTRACT_PATH: &str = + "contracts/l2-contracts/artifacts-zk/contracts/dev-contracts/Multicall3.sol/Multicall3.json"; + +/// Inflates the provided bytecode by appending the specified amount of NOP instructions at the end. +fn inflate_bytecode(bytecode: &mut Vec, nop_count: usize) { + bytecode.extend( + iter::repeat(EncodingModeProduction::nop_encoding().to_be_bytes()) + .take(nop_count) + .flatten(), + ); +} + +fn default_fee() -> Fee { + let fee_input = ::default_batch_fee_input_scaled( + FeeParams::sensible_v1_default(), + 1.0, + 1.0, + ); + let (max_fee_per_gas, gas_per_pubdata_limit) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::default().into()); + Fee { + gas_limit: 10_000_000.into(), + max_fee_per_gas: max_fee_per_gas.into(), + max_priority_fee_per_gas: 0_u64.into(), + gas_per_pubdata_limit: gas_per_pubdata_limit.into(), + } +} + +#[derive(Debug, Default)] +pub(crate) struct StateBuilder { + inner: HashMap, +} + +impl StateBuilder { + pub(crate) const LOAD_TEST_ADDRESS: Address = Address::repeat_byte(1); + pub(crate) const EXPENSIVE_CONTRACT_ADDRESS: Address = Address::repeat_byte(2); + pub(crate) const PRECOMPILES_CONTRACT_ADDRESS: Address = Address::repeat_byte(3); + const COUNTER_CONTRACT_ADDRESS: Address = Address::repeat_byte(4); + const INFINITE_LOOP_CONTRACT_ADDRESS: Address = Address::repeat_byte(5); + const MULTICALL3_ADDRESS: Address = Address::repeat_byte(6); + + pub fn with_contract(mut self, address: Address, bytecode: Vec) -> Self { + self.inner.insert( + address, + OverrideAccount { + code: Some(Bytecode::new(bytecode).unwrap()), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn inflate_bytecode(mut self, address: Address, nop_count: usize) -> Self { + let account_override = self.inner.get_mut(&address).expect("no contract"); + let bytecode = account_override.code.take().expect("no code override"); + let mut bytecode = bytecode.into_bytes(); + inflate_bytecode(&mut bytecode, nop_count); + account_override.code = Some(Bytecode::new(bytecode).unwrap()); + self + } + + pub fn with_load_test_contract(mut self) -> Self { + // Set the array length in the load test contract to 100, so that reads don't fail. + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(100))]); + self.inner.insert( + Self::LOAD_TEST_ADDRESS, + OverrideAccount { + code: Some(Bytecode::new(get_loadnext_contract().bytecode).unwrap()), + state: Some(OverrideState::State(state)), + ..OverrideAccount::default() + }, + ); + self + } + + pub fn with_balance(mut self, address: Address, balance: U256) -> Self { + self.inner.entry(address).or_default().balance = Some(balance); + self + } + + pub fn with_expensive_contract(self) -> Self { + self.with_contract( + Self::EXPENSIVE_CONTRACT_ADDRESS, + read_bytecode(EXPENSIVE_CONTRACT_PATH), + ) + } + + pub fn with_precompiles_contract(self) -> Self { + self.with_contract( + Self::PRECOMPILES_CONTRACT_ADDRESS, + read_bytecode(PRECOMPILES_CONTRACT_PATH), + ) + } + + pub fn with_counter_contract(self, initial_value: u64) -> Self { + let mut this = self.with_contract( + Self::COUNTER_CONTRACT_ADDRESS, + read_bytecode(COUNTER_CONTRACT_PATH), + ); + if initial_value != 0 { + let state = HashMap::from([(H256::zero(), H256::from_low_u64_be(initial_value))]); + this.inner + .get_mut(&Self::COUNTER_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::State(state)); + } + this + } + + pub fn with_infinite_loop_contract(self) -> Self { + self.with_contract( + Self::INFINITE_LOOP_CONTRACT_ADDRESS, + read_bytecode(INFINITE_LOOP_CONTRACT_PATH), + ) + } + + pub fn with_multicall3_contract(self) -> Self { + self.with_contract( + Self::MULTICALL3_ADDRESS, + read_bytecode(MULTICALL3_CONTRACT_PATH), + ) + } + + pub fn build(self) -> StateOverride { + StateOverride::new(self.inner) + } + + /// Applies these state overrides to Postgres storage, which is assumed to be empty (other than genesis data). + pub async fn apply(self, connection: &mut Connection<'_, Core>) { + let mut storage_logs = vec![]; + let mut factory_deps = HashMap::new(); + for (address, account) in self.inner { + if let Some(balance) = account.balance { + let balance_key = storage_key_for_eth_balance(&address); + storage_logs.push(StorageLog::new_write_log( + balance_key, + u256_to_h256(balance), + )); + } + if let Some(code) = account.code { + let code_hash = code.hash(); + storage_logs.extend([ + StorageLog::new_write_log(get_code_key(&address), code_hash), + StorageLog::new_write_log( + get_known_code_key(&code_hash), + H256::from_low_u64_be(1), + ), + ]); + factory_deps.insert(code_hash, code.into_bytes()); + } + if let Some(state) = account.state { + let state_slots = match state { + OverrideState::State(slots) | OverrideState::StateDiff(slots) => slots, + }; + let state_logs = state_slots.into_iter().map(|(key, value)| { + let key = StorageKey::new(AccountTreeId::new(address), key); + StorageLog::new_write_log(key, value) + }); + storage_logs.extend(state_logs); + } + } + + connection + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &storage_logs) + .await + .unwrap(); + connection + .factory_deps_dal() + .insert_factory_deps(L2BlockNumber(0), &factory_deps) + .await + .unwrap(); + } +} + +#[derive(Debug)] +pub(crate) struct Call3Value { + target: Address, + allow_failure: bool, + value: U256, + calldata: Vec, +} + +impl Call3Value { + pub fn allow_failure(mut self) -> Self { + self.allow_failure = true; + self + } + + fn to_token(&self) -> Token { + Token::Tuple(vec![ + Token::Address(self.target), + Token::Bool(self.allow_failure), + Token::Uint(self.value), + Token::Bytes(self.calldata.clone()), + ]) + } +} + +impl From for Call3Value { + fn from(req: CallRequest) -> Self { + Self { + target: req.to.unwrap(), + allow_failure: false, + value: req.value.unwrap_or_default(), + calldata: req.data.unwrap_or_default().0, + } + } +} + +impl From for Call3Value { + fn from(tx: L2Tx) -> Self { + Self { + target: tx.recipient_account().unwrap(), + allow_failure: false, + value: tx.execute.value, + calldata: tx.execute.calldata, + } + } +} + +#[derive(Debug)] +pub(crate) struct Call3Result { + pub success: bool, + pub return_data: Vec, +} + +impl Call3Result { + pub fn parse(raw: &[u8]) -> Vec { + let mut tokens = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .decode_output(raw) + .expect("failed decoding `aggregate3Value` output"); + assert_eq!(tokens.len(), 1, "Invalid output length"); + let Token::Array(results) = tokens.pop().unwrap() else { + panic!("Invalid token type, expected an array"); + }; + results.into_iter().map(Self::parse_single).collect() + } + + fn parse_single(token: Token) -> Self { + let Token::Tuple(mut tokens) = token else { + panic!("Invalid token type, expected a tuple"); + }; + assert_eq!(tokens.len(), 2); + let return_data = tokens.pop().unwrap().into_bytes().expect("expected bytes"); + let success = tokens.pop().unwrap().into_bool().expect("expected bool"); + Self { + success, + return_data, + } + } + + pub fn as_u256(&self) -> U256 { + decode_u256_output(&self.return_data) + } +} + +pub(crate) fn decode_u256_output(raw_output: &[u8]) -> U256 { + let mut tokens = ethabi::decode_whole(&[ethabi::ParamType::Uint(256)], raw_output) + .expect("unexpected return data"); + assert_eq!(tokens.len(), 1); + tokens.pop().unwrap().into_uint().unwrap() +} + +pub(crate) trait TestAccount { + fn create_transfer(&self, value: U256) -> L2Tx { + let fee = Fee { + gas_limit: 200_000.into(), + ..default_fee() + }; + self.create_transfer_with_fee(value, fee) + } + + fn query_base_token_balance(&self) -> CallRequest; + + fn create_transfer_with_fee(&self, value: U256, fee: Fee) -> L2Tx; + + fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx; + + fn create_expensive_tx(&self, write_count: usize) -> L2Tx; + + fn create_expensive_cleanup_tx(&self) -> L2Tx; + + fn create_code_oracle_tx(&self, bytecode_hash: H256, expected_keccak_hash: H256) -> L2Tx; + + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx; + + fn query_counter_value(&self) -> CallRequest; + + fn create_infinite_loop_tx(&self) -> L2Tx; + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest; +} + +impl TestAccount for K256PrivateKey { + fn create_transfer_with_fee(&self, value: U256, fee: Fee) -> L2Tx { + L2Tx::new_signed( + Some(Address::random()), + vec![], + Nonce(0), + fee, + value, + L2ChainId::default(), + self, + vec![], + PaymasterParams::default(), + ) + .unwrap() + } + + fn query_base_token_balance(&self) -> CallRequest { + let data = eth_contract() + .function("balanceOf") + .expect("No `balanceOf` function in contract") + .encode_input(&[Token::Uint(address_to_u256(&self.address()))]) + .expect("failed encoding `balanceOf` function"); + CallRequest { + from: Some(self.address()), + to: Some(L2_BASE_TOKEN_ADDRESS), + data: Some(data.into()), + ..CallRequest::default() + } + } + + fn create_load_test_tx(&self, params: LoadnextContractExecutionParams) -> L2Tx { + L2Tx::new_signed( + Some(StateBuilder::LOAD_TEST_ADDRESS), + params.to_bytes(), + Nonce(0), + default_fee(), + 0.into(), + L2ChainId::default(), + self, + if params.deploys > 0 { + get_loadnext_contract().factory_deps + } else { + vec![] + }, + PaymasterParams::default(), + ) + .unwrap() + } + + fn create_expensive_tx(&self, write_count: usize) -> L2Tx { + let calldata = load_contract(EXPENSIVE_CONTRACT_PATH) + .function("expensive") + .expect("no `expensive` function in contract") + .encode_input(&[Token::Uint(write_count.into())]) + .expect("failed encoding `expensive` function"); + L2Tx::new_signed( + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), + calldata, + Nonce(0), + default_fee(), + 0.into(), + L2ChainId::default(), + self, + vec![], + PaymasterParams::default(), + ) + .unwrap() + } + + fn create_expensive_cleanup_tx(&self) -> L2Tx { + let calldata = load_contract(EXPENSIVE_CONTRACT_PATH) + .function("cleanUp") + .expect("no `cleanUp` function in contract") + .encode_input(&[]) + .expect("failed encoding `cleanUp` input"); + L2Tx::new_signed( + Some(StateBuilder::EXPENSIVE_CONTRACT_ADDRESS), + calldata, + Nonce(0), + default_fee(), + 0.into(), + L2ChainId::default(), + self, + vec![], + PaymasterParams::default(), + ) + .unwrap() + } + + fn create_code_oracle_tx(&self, bytecode_hash: H256, expected_keccak_hash: H256) -> L2Tx { + let calldata = load_contract(PRECOMPILES_CONTRACT_PATH) + .function("callCodeOracle") + .expect("no `callCodeOracle` function") + .encode_input(&[ + Token::FixedBytes(bytecode_hash.0.to_vec()), + Token::FixedBytes(expected_keccak_hash.0.to_vec()), + ]) + .expect("failed encoding `callCodeOracle` input"); + L2Tx::new_signed( + Some(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS), + calldata, + Nonce(0), + default_fee(), + 0.into(), + L2ChainId::default(), + self, + vec![], + PaymasterParams::default(), + ) + .unwrap() + } + + fn create_counter_tx(&self, increment: U256, revert: bool) -> L2Tx { + let calldata = load_contract(COUNTER_CONTRACT_PATH) + .function("incrementWithRevert") + .expect("no `incrementWithRevert` function") + .encode_input(&[Token::Uint(increment), Token::Bool(revert)]) + .expect("failed encoding `incrementWithRevert` input"); + L2Tx::new_signed( + Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + calldata, + Nonce(0), + default_fee(), + 0.into(), + L2ChainId::default(), + self, + vec![], + PaymasterParams::default(), + ) + .unwrap() + } + + fn query_counter_value(&self) -> CallRequest { + let calldata = load_contract(COUNTER_CONTRACT_PATH) + .function("get") + .expect("no `get` function") + .encode_input(&[]) + .expect("failed encoding `get` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::COUNTER_CONTRACT_ADDRESS), + data: Some(calldata.into()), + ..CallRequest::default() + } + } + + fn create_infinite_loop_tx(&self) -> L2Tx { + let calldata = load_contract(INFINITE_LOOP_CONTRACT_PATH) + .function("infiniteLoop") + .expect("no `infiniteLoop` function") + .encode_input(&[]) + .expect("failed encoding `infiniteLoop` input"); + L2Tx::new_signed( + Some(StateBuilder::INFINITE_LOOP_CONTRACT_ADDRESS), + calldata, + Nonce(0), + default_fee(), + 0.into(), + L2ChainId::default(), + self, + vec![], + PaymasterParams::default(), + ) + .unwrap() + } + + fn multicall_with_value(&self, value: U256, calls: &[Call3Value]) -> CallRequest { + let call_tokens = calls.iter().map(Call3Value::to_token).collect(); + let calldata = load_contract(MULTICALL3_CONTRACT_PATH) + .function("aggregate3Value") + .expect("no `aggregate3Value` function") + .encode_input(&[Token::Array(call_tokens)]) + .expect("failed encoding `aggregate3Value` input"); + CallRequest { + from: Some(self.address()), + to: Some(StateBuilder::MULTICALL3_ADDRESS), + value: Some(value), + data: Some(calldata.into()), + ..CallRequest::default() + } + } +} diff --git a/core/node/api_server/src/tx_sender/gas_estimation.rs b/core/node/api_server/src/tx_sender/gas_estimation.rs new file mode 100644 index 00000000000..f5e42875a3d --- /dev/null +++ b/core/node/api_server/src/tx_sender/gas_estimation.rs @@ -0,0 +1,549 @@ +use std::{ops, time::Instant}; + +use anyhow::Context; +use zksync_dal::CoreDal; +use zksync_multivm::{ + interface::{TransactionExecutionMetrics, VmExecutionResultAndLogs}, + utils::{ + adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, + get_max_batch_gas_limit, + }, +}; +use zksync_system_constants::MAX_L2_TX_GAS_LIMIT; +use zksync_types::{ + api::state_override::StateOverride, fee::Fee, fee_model::BatchFeeInput, get_code_key, + ExecuteTransactionCommon, PackedEthSignature, ProtocolVersionId, Transaction, H256, +}; + +use super::{result::ApiCallResult, SubmitTxError, TxSender}; +use crate::execution_sandbox::{BlockArgs, SandboxAction, VmPermit, SANDBOX_METRICS}; + +#[derive(Debug, Clone, Copy)] +pub(crate) enum BinarySearchKind { + /// Full binary search. + Full, + /// Binary search with an optimized initial pivot. + Optimized, +} + +impl BinarySearchKind { + pub(crate) fn new(optimize: bool) -> Self { + if optimize { + Self::Optimized + } else { + Self::Full + } + } +} + +impl TxSender { + #[tracing::instrument(level = "debug", skip_all, fields( + initiator = ?tx.initiator_account(), + nonce = ?tx.nonce(), + ))] + pub async fn get_txs_fee_in_wei( + &self, + tx: Transaction, + estimated_fee_scale_factor: f64, + acceptable_overestimation: u64, + state_override: Option, + kind: BinarySearchKind, + ) -> Result { + let estimation_started_at = Instant::now(); + let mut estimator = GasEstimator::new(self, tx, state_override).await?; + estimator.adjust_transaction_fee(); + + let initial_estimate = estimator.initialize().await?; + tracing::trace!( + "preparation took {:?}, starting binary search", + estimation_started_at.elapsed() + ); + + let optimized_lower_bound = initial_estimate.lower_gas_bound_without_overhead(); + // Perform an initial search iteration with the pivot slightly greater than `gas_used` to account for 63/64 rule for far calls etc. + // If the transaction succeeds, it will discard most of the search space at once. + let optimistic_gas_limit = initial_estimate.optimistic_gas_limit_without_overhead(); + + let (bounds, initial_pivot) = match kind { + BinarySearchKind::Full => { + let lower_bound = initial_estimate.gas_charged_for_pubdata; + let upper_bound = MAX_L2_TX_GAS_LIMIT + initial_estimate.gas_charged_for_pubdata; + (lower_bound..=upper_bound, None) + } + BinarySearchKind::Optimized => { + let lower_bound = + optimized_lower_bound.unwrap_or(initial_estimate.gas_charged_for_pubdata); + let upper_bound = MAX_L2_TX_GAS_LIMIT + initial_estimate.gas_charged_for_pubdata; + let initial_pivot = optimistic_gas_limit.filter(|&gas| { + // If `optimistic_gas_limit` is greater than the ordinary binary search pivot, there's no sense using it. + gas < (lower_bound + upper_bound) / 2 + }); + (lower_bound..=upper_bound, initial_pivot) + } + }; + + let (unscaled_gas_limit, iteration_count) = + Self::binary_search(&estimator, bounds, initial_pivot, acceptable_overestimation) + .await?; + // Metrics are intentionally reported regardless of the binary search mode, so that the collected stats can be used to adjust + // optimized binary search params (e.g., the initial pivot multiplier). + if let Some(lower_bound) = optimized_lower_bound { + let tx_overhead = estimator.tx_overhead(unscaled_gas_limit); + let diff = (unscaled_gas_limit as f64 - lower_bound as f64) + / (unscaled_gas_limit + tx_overhead) as f64; + SANDBOX_METRICS + .estimate_gas_lower_bound_relative_diff + .observe(diff); + } + if let Some(optimistic_gas_limit) = optimistic_gas_limit { + let tx_overhead = estimator.tx_overhead(unscaled_gas_limit); + let diff = (optimistic_gas_limit as f64 - unscaled_gas_limit as f64) + / (unscaled_gas_limit + tx_overhead) as f64; + SANDBOX_METRICS + .estimate_gas_optimistic_gas_limit_relative_diff + .observe(diff); + } + tracing::debug!( + optimized_lower_bound, + optimistic_gas_limit, + unscaled_gas_limit, + binary_search = ?kind, + iteration_count, + "Finished estimating gas limit for transaction" + ); + + let suggested_gas_limit = (unscaled_gas_limit as f64 * estimated_fee_scale_factor) as u64; + estimator + .finalize(suggested_gas_limit, estimated_fee_scale_factor) + .await + } + + async fn binary_search( + estimator: &GasEstimator<'_>, + bounds: ops::RangeInclusive, + initial_pivot: Option, + acceptable_overestimation: u64, + ) -> Result<(u64, usize), SubmitTxError> { + let mut number_of_iterations = 0; + let mut lower_bound = *bounds.start(); + let mut upper_bound = *bounds.end(); + + if let Some(pivot) = initial_pivot { + let iteration_started_at = Instant::now(); + let (result, _) = estimator + .step(pivot) + .await + .context("estimate_gas step failed")?; + Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, pivot, &result); + + tracing::trace!( + "iteration {number_of_iterations} took {:?}. lower_bound: {lower_bound}, upper_bound: {upper_bound}", + iteration_started_at.elapsed() + ); + number_of_iterations += 1; + } + + // We are using binary search to find the minimal values of gas_limit under which the transaction succeeds. + while lower_bound + acceptable_overestimation < upper_bound { + let mid = (lower_bound + upper_bound) / 2; + // There is no way to distinct between errors due to out of gas + // or normal execution errors, so we just hope that increasing the + // gas limit will make the transaction successful + let iteration_started_at = Instant::now(); + let (result, _) = estimator + .step(mid) + .await + .context("estimate_gas step failed")?; + Self::adjust_search_bounds(&mut lower_bound, &mut upper_bound, mid, &result); + + tracing::trace!( + "iteration {number_of_iterations} took {:?}. lower_bound: {lower_bound}, upper_bound: {upper_bound}", + iteration_started_at.elapsed() + ); + number_of_iterations += 1; + } + SANDBOX_METRICS + .estimate_gas_binary_search_iterations + .observe(number_of_iterations); + Ok((upper_bound, number_of_iterations)) + } + + async fn ensure_sufficient_balance( + &self, + tx: &Transaction, + state_override: Option<&StateOverride>, + ) -> Result<(), SubmitTxError> { + let hashed_key = get_code_key(&tx.initiator_account()); + // If the default account does not have enough funds for transferring `tx.value`, without taking into account the fee, + // there is no sense to estimate the fee. + let account_code_hash = self + .acquire_replica_connection() + .await? + .storage_web3_dal() + .get_value(&hashed_key) + .await + .with_context(|| { + format!( + "failed getting code hash for account {:?}", + tx.initiator_account() + ) + })?; + + if !tx.is_l1() && account_code_hash == H256::zero() { + let balance = match state_override + .and_then(|overrides| overrides.get(&tx.initiator_account())) + .and_then(|account| account.balance) + { + Some(balance) => balance, + None => self.get_balance(&tx.initiator_account()).await?, + }; + + if tx.execute.value > balance { + tracing::info!( + "fee estimation failed on validation step. + account: {} does not have enough funds for for transferring tx.value: {}.", + tx.initiator_account(), + tx.execute.value + ); + return Err(SubmitTxError::InsufficientFundsForTransfer); + } + } + Ok(()) + } + + fn adjust_search_bounds( + lower_bound: &mut u64, + upper_bound: &mut u64, + pivot: u64, + result: &VmExecutionResultAndLogs, + ) { + // For now, we don't discern between "out of gas" and other failure reasons since it's difficult in the general case. + if result.result.is_failed() { + *lower_bound = pivot + 1; + } else { + *upper_bound = pivot; + } + } +} + +/// Initial gas estimate with effectively infinite gas limit. +#[derive(Debug)] +pub(super) struct InitialGasEstimate { + /// Set to `None` if not estimated (e.g., for L1 transactions), or if the VM returned bogus refund stats. + pub total_gas_charged: Option, + /// Set to `None` if not estimated (e.g., for L1 transactions). + pub computational_gas_used: Option, + /// Operator-defined overhead for the estimated transaction. For recent VM versions, the overhead only depends + /// on the transaction encoding size. + pub operator_overhead: u64, + pub gas_charged_for_pubdata: u64, +} + +impl InitialGasEstimate { + /// Returns the lower gas limit bound, i.e., gas limit that is guaranteed to be lower than the minimum passing gas limit, + /// but is reasonably close to it. + /// + /// # Background + /// + /// Total gas charged for a transaction consists of: + /// + /// - Operator-set overhead (`self.operator_overhead`) + /// - Intrinsic bootloader overhead + /// - Gas used during validation / execution (`self.computational_gas_used`) + /// - Gas charged for pubdata at the end of execution (`self.gas_charged_for_pubdata`) + /// + /// We add `operator_overhead` manually to the binary search argument at each `step()` because it depends on the gas limit in the general case, + /// so the returned value corresponds to the other 3 terms. + /// + /// If the value cannot be computed, it is set to `None`. + pub fn lower_gas_bound_without_overhead(&self) -> Option { + // The two ways to compute the used gas (by `computational_gas_used` and by the charged gas) don't return the identical values + // due to various factors: + // + // - `computational_gas_used` tracks gas usage for the entire VM execution, while the transaction initiator (or a paymaster) is only charged + // for a part of it. + // - The bootloader is somewhat lenient in the case pubdata costs are approximately equal to the amount of gas left + // (i.e., for some transaction types, such as base token transfers, there exists an entire range of gas limit values + // which all lead to a successful execution with 0 refund). + // + // We use the lesser of these two estimates as the lower bound. + let mut total_gas_bound = self.computational_gas_used? + self.gas_charged_for_pubdata; + if let Some(gas_charged) = self.total_gas_charged { + total_gas_bound = total_gas_bound.min(gas_charged); + } + total_gas_bound.checked_sub(self.operator_overhead) + } + + /// Returns heuristically chosen gas limit without operator overhead that should be sufficient for most transactions. + /// This value is reasonably close to the lower gas limit bound, so that when used as the initial binary search pivot, + /// it will discard most of the search space in the average case. + pub fn optimistic_gas_limit_without_overhead(&self) -> Option { + let gas_charged_without_overhead = self + .total_gas_charged? + .checked_sub(self.operator_overhead)?; + // 21/20 is an empirical multiplier. It is higher than what empirically suffices for some common transactions; + // one can argue that using 64/63 multiplier would be more accurate due to the 63/64 rule for far calls. + // However, far calls are not the only source of gas overhead in Era; another one are decommit operations. + Some(gas_charged_without_overhead * 21 / 20) + } +} + +/// Encapsulates gas estimation process for a specific transaction. +/// +/// Public for testing purposes. +#[derive(Debug)] +pub(super) struct GasEstimator<'a> { + sender: &'a TxSender, + transaction: Transaction, + state_override: Option, + vm_permit: VmPermit, + fee_input: BatchFeeInput, + base_fee: u64, + gas_per_pubdata_byte: u64, + max_gas_limit: u64, + block_args: BlockArgs, + protocol_version: ProtocolVersionId, +} + +impl<'a> GasEstimator<'a> { + pub(super) async fn new( + sender: &'a TxSender, + mut transaction: Transaction, + state_override: Option, + ) -> Result { + let mut connection = sender.acquire_replica_connection().await?; + let block_args = BlockArgs::pending(&mut connection).await?; + let protocol_version = connection + .blocks_dal() + .pending_protocol_version() + .await + .context("failed getting pending protocol version")?; + drop(connection); + + let max_gas_limit = get_max_batch_gas_limit(protocol_version.into()); + let fee_input = adjust_pubdata_price_for_tx( + sender.scaled_batch_fee_input().await?, + transaction.gas_per_pubdata_byte_limit(), + // We do not have to adjust the params to the `gasPrice` of the transaction, since + // its gas price will be amended later on to suit the `fee_input` + None, + protocol_version.into(), + ); + let (base_fee, gas_per_pubdata_byte) = + derive_base_fee_and_gas_per_pubdata(fee_input, protocol_version.into()); + + sender + .ensure_sufficient_balance(&transaction, state_override.as_ref()) + .await?; + + // For L2 transactions we need a properly formatted signature + if let ExecuteTransactionCommon::L2(l2_common_data) = &mut transaction.common_data { + if l2_common_data.signature.is_empty() { + l2_common_data.signature = PackedEthSignature::default().serialize_packed().into(); + } + } + + // Acquire the vm token for the whole duration of the binary search. + let vm_permit = sender.0.vm_concurrency_limiter.acquire().await; + let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; + + Ok(Self { + sender, + transaction, + state_override, + vm_permit, + fee_input, + base_fee, + gas_per_pubdata_byte, + max_gas_limit, + block_args, + protocol_version, + }) + } + + pub(super) fn adjust_transaction_fee(&mut self) { + match &mut self.transaction.common_data { + ExecuteTransactionCommon::L2(common_data) => { + common_data.fee.max_fee_per_gas = self.base_fee.into(); + common_data.fee.max_priority_fee_per_gas = self.base_fee.into(); + } + ExecuteTransactionCommon::L1(common_data) => { + common_data.max_fee_per_gas = self.base_fee.into(); + } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + common_data.max_fee_per_gas = self.base_fee.into(); + } + } + } + + pub(super) async fn initialize(&self) -> Result { + let operator_overhead = self.tx_overhead(self.max_gas_limit); + + // When the pubdata cost grows very high, the total gas limit required may become very high as well. If + // we do binary search over any possible gas limit naively, we may end up with a very high number of iterations, + // which affects performance. + // + // To optimize for this case, we first calculate the amount of gas needed to cover for the pubdata. After that, we + // need to do a smaller binary search that is focused on computational gas limit only. + if self.transaction.is_l1() { + // For L1 transactions the pubdata priced in such a way that the maximal computational + // gas limit should be enough to cover for the pubdata as well, so no additional gas is provided there. + Ok(InitialGasEstimate { + total_gas_charged: None, + computational_gas_used: None, + operator_overhead, + gas_charged_for_pubdata: 0, + }) + } else { + // For L2 transactions, we estimate the amount of gas needed to cover for the pubdata by creating a transaction with infinite gas limit, + // and getting how much pubdata it used. + + let (result, _) = self + .unadjusted_step(self.max_gas_limit) + .await + .context("estimate_gas step failed")?; + // If the transaction has failed with such a large gas limit, we return an API error here right away, + // since the inferred gas bounds would be unreliable in this case. + result.check_api_call_result()?; + + // It is assumed that there is no overflow here + let gas_charged_for_pubdata = + u64::from(result.statistics.pubdata_published) * self.gas_per_pubdata_byte; + + let total_gas_charged = self.max_gas_limit.checked_sub(result.refunds.gas_refunded); + Ok(InitialGasEstimate { + total_gas_charged, + computational_gas_used: Some(result.statistics.computational_gas_used.into()), + operator_overhead, + gas_charged_for_pubdata, + }) + } + } + + /// Derives operator overhead for a transaction given its gas limit. + fn tx_overhead(&self, tx_gas_limit: u64) -> u64 { + derive_overhead( + tx_gas_limit, + self.gas_per_pubdata_byte as u32, + self.transaction.encoding_len(), + self.transaction.tx_format() as u8, + self.protocol_version.into(), + ) + .into() + } + + async fn step( + &self, + tx_gas_limit: u64, + ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + let gas_limit_with_overhead = tx_gas_limit + self.tx_overhead(tx_gas_limit); + // We need to ensure that we never use a gas limit that is higher than the maximum allowed + let forced_gas_limit = + gas_limit_with_overhead.min(get_max_batch_gas_limit(self.protocol_version.into())); + self.unadjusted_step(forced_gas_limit).await + } + + pub(super) async fn unadjusted_step( + &self, + forced_gas_limit: u64, + ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { + let mut tx = self.transaction.clone(); + match &mut tx.common_data { + ExecuteTransactionCommon::L1(l1_common_data) => { + l1_common_data.gas_limit = forced_gas_limit.into(); + let required_funds = + l1_common_data.gas_limit * l1_common_data.max_fee_per_gas + tx.execute.value; + l1_common_data.to_mint = required_funds; + } + ExecuteTransactionCommon::L2(l2_common_data) => { + l2_common_data.fee.gas_limit = forced_gas_limit.into(); + } + ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { + common_data.gas_limit = forced_gas_limit.into(); + let required_funds = + common_data.gas_limit * common_data.max_fee_per_gas + tx.execute.value; + common_data.to_mint = required_funds; + } + } + + let action = SandboxAction::GasEstimation { + tx, + fee_input: self.fee_input, + base_fee: self.base_fee, + }; + let connection = self.sender.acquire_replica_connection().await?; + let executor = &self.sender.0.executor; + let execution_output = executor + .execute_in_sandbox( + self.vm_permit.clone(), + connection, + action, + &self.block_args, + self.state_override.clone(), + ) + .await?; + Ok((execution_output.vm, execution_output.metrics)) + } + + async fn finalize( + self, + suggested_gas_limit: u64, + estimated_fee_scale_factor: f64, + ) -> Result { + let (result, tx_metrics) = self + .step(suggested_gas_limit) + .await + .context("final estimate_gas step failed")?; + result.into_api_call_result()?; + self.sender + .ensure_tx_executable(&self.transaction, &tx_metrics, false)?; + + // Now, we need to calculate the final overhead for the transaction. + let overhead = derive_overhead( + suggested_gas_limit, + self.gas_per_pubdata_byte as u32, + self.transaction.encoding_len(), + self.transaction.tx_format() as u8, + self.protocol_version.into(), + ); + + let full_gas_limit = match suggested_gas_limit.overflowing_add(overhead.into()) { + (value, false) => { + if value > self.max_gas_limit { + return Err(SubmitTxError::ExecutionReverted( + "exceeds block gas limit".to_string(), + vec![], + )); + } + + value + } + (_, true) => { + return Err(SubmitTxError::ExecutionReverted( + "exceeds block gas limit".to_string(), + vec![], + )); + } + }; + + let gas_for_pubdata = u64::from(tx_metrics.pubdata_published) * self.gas_per_pubdata_byte; + let estimated_gas_for_pubdata = + (gas_for_pubdata as f64 * estimated_fee_scale_factor) as u64; + + tracing::debug!( + "gas for pubdata: {estimated_gas_for_pubdata}, computational gas: {comp_gas}, overhead gas: {overhead} \ + (with params base_fee: {base_fee}, gas_per_pubdata_byte: {gas_per_pubdata_byte}) \ + estimated_fee_scale_factor: {estimated_fee_scale_factor}", + comp_gas = suggested_gas_limit - estimated_gas_for_pubdata, + base_fee = self.base_fee, + gas_per_pubdata_byte = self.gas_per_pubdata_byte + ); + + Ok(Fee { + max_fee_per_gas: self.base_fee.into(), + max_priority_fee_per_gas: 0u32.into(), + gas_limit: full_gas_limit.into(), + gas_per_pubdata_limit: self.gas_per_pubdata_byte.into(), + }) + } +} diff --git a/core/node/api_server/src/tx_sender/mod.rs b/core/node/api_server/src/tx_sender/mod.rs index 44eaae2e3ee..ad8e38ef3cc 100644 --- a/core/node/api_server/src/tx_sender/mod.rs +++ b/core/node/api_server/src/tx_sender/mod.rs @@ -1,24 +1,16 @@ //! Helper module to submit transactions into the ZKsync Network. -use std::{sync::Arc, time::Instant}; +use std::sync::Arc; use anyhow::Context as _; use tokio::sync::RwLock; use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig}; -use zksync_contracts::BaseSystemContracts; use zksync_dal::{ transactions_dal::L2TxSubmissionResult, Connection, ConnectionPool, Core, CoreDal, }; use zksync_multivm::{ - interface::{ - OneshotTracingParams, TransactionExecutionMetrics, TxExecutionArgs, TxExecutionMode, - VmExecutionResultAndLogs, - }, - utils::{ - adjust_pubdata_price_for_tx, derive_base_fee_and_gas_per_pubdata, derive_overhead, - get_max_batch_gas_limit, - }, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, + interface::{OneshotTracingParams, TransactionExecutionMetrics, VmExecutionResultAndLogs}, + utils::{derive_base_fee_and_gas_per_pubdata, get_max_batch_gas_limit}, }; use zksync_node_fee_model::{ApiFeeInputProvider, BatchFeeModelInputProvider}; use zksync_state::PostgresStorageCaches; @@ -28,28 +20,25 @@ use zksync_state_keeper::{ }; use zksync_types::{ api::state_override::StateOverride, - fee::Fee, fee_model::BatchFeeInput, - get_code_key, get_intrinsic_constants, + get_intrinsic_constants, l2::{error::TxCheckError::TxDuplication, L2Tx}, transaction_request::CallOverrides, utils::storage_key_for_eth_balance, - vm::VmVersion, - AccountTreeId, Address, ExecuteTransactionCommon, L2ChainId, Nonce, PackedEthSignature, - ProtocolVersionId, Transaction, H160, H256, MAX_L2_TX_GAS_LIMIT, MAX_NEW_FACTORY_DEPS, U256, + AccountTreeId, Address, L2ChainId, Nonce, ProtocolVersionId, Transaction, H160, H256, + MAX_NEW_FACTORY_DEPS, U256, }; use zksync_utils::h256_to_u256; +use zksync_vm_executor::oneshot::{CallOrExecute, EstimateGas, OneshotEnvParameters}; -pub(super) use self::result::SubmitTxError; -use self::{master_pool_sink::MasterPoolSink, tx_sink::TxSink}; -use crate::{ - execution_sandbox::{ - BlockArgs, SubmitTxStage, TransactionExecutor, TxSetupArgs, VmConcurrencyBarrier, - VmConcurrencyLimiter, VmPermit, SANDBOX_METRICS, - }, - tx_sender::result::ApiCallResult, +pub(super) use self::{gas_estimation::BinarySearchKind, result::SubmitTxError}; +use self::{master_pool_sink::MasterPoolSink, result::ApiCallResult, tx_sink::TxSink}; +use crate::execution_sandbox::{ + BlockArgs, SandboxAction, SandboxExecutor, SubmitTxStage, VmConcurrencyBarrier, + VmConcurrencyLimiter, SANDBOX_METRICS, }; +mod gas_estimation; pub mod master_pool_sink; pub mod proxy; mod result; @@ -80,133 +69,55 @@ pub async fn build_tx_sender( let batch_fee_input_provider = ApiFeeInputProvider::new(batch_fee_model_input_provider, replica_pool); - + let executor_options = SandboxExecutorOptions::new( + tx_sender_config.chain_id, + AccountTreeId::new(tx_sender_config.fee_account_addr), + tx_sender_config.validation_computational_gas_limit, + ) + .await?; let tx_sender = tx_sender_builder.build( Arc::new(batch_fee_input_provider), Arc::new(vm_concurrency_limiter), - ApiContracts::load_from_disk().await?, + executor_options, storage_caches, ); Ok((tx_sender, vm_barrier)) } -#[derive(Debug, Clone)] -pub struct MultiVMBaseSystemContracts { - /// Contracts to be used for pre-virtual-blocks protocol versions. - pub(crate) pre_virtual_blocks: BaseSystemContracts, - /// Contracts to be used for post-virtual-blocks protocol versions. - pub(crate) post_virtual_blocks: BaseSystemContracts, - /// Contracts to be used for protocol versions after virtual block upgrade fix. - pub(crate) post_virtual_blocks_finish_upgrade_fix: BaseSystemContracts, - /// Contracts to be used for post-boojum protocol versions. - pub(crate) post_boojum: BaseSystemContracts, - /// Contracts to be used after the allow-list removal upgrade - pub(crate) post_allowlist_removal: BaseSystemContracts, - /// Contracts to be used after the 1.4.1 upgrade - pub(crate) post_1_4_1: BaseSystemContracts, - /// Contracts to be used after the 1.4.2 upgrade - pub(crate) post_1_4_2: BaseSystemContracts, - /// Contracts to be used during the `v23` upgrade. This upgrade was done on an internal staging environment only. - pub(crate) vm_1_5_0_small_memory: BaseSystemContracts, - /// Contracts to be used after the 1.5.0 upgrade - pub(crate) vm_1_5_0_increased_memory: BaseSystemContracts, -} - -impl MultiVMBaseSystemContracts { - pub fn get_by_protocol_version(self, version: ProtocolVersionId) -> BaseSystemContracts { - match version { - ProtocolVersionId::Version0 - | ProtocolVersionId::Version1 - | ProtocolVersionId::Version2 - | ProtocolVersionId::Version3 - | ProtocolVersionId::Version4 - | ProtocolVersionId::Version5 - | ProtocolVersionId::Version6 - | ProtocolVersionId::Version7 - | ProtocolVersionId::Version8 - | ProtocolVersionId::Version9 - | ProtocolVersionId::Version10 - | ProtocolVersionId::Version11 - | ProtocolVersionId::Version12 => self.pre_virtual_blocks, - ProtocolVersionId::Version13 => self.post_virtual_blocks, - ProtocolVersionId::Version14 - | ProtocolVersionId::Version15 - | ProtocolVersionId::Version16 - | ProtocolVersionId::Version17 => self.post_virtual_blocks_finish_upgrade_fix, - ProtocolVersionId::Version18 => self.post_boojum, - ProtocolVersionId::Version19 => self.post_allowlist_removal, - ProtocolVersionId::Version20 => self.post_1_4_1, - ProtocolVersionId::Version21 | ProtocolVersionId::Version22 => self.post_1_4_2, - ProtocolVersionId::Version23 => self.vm_1_5_0_small_memory, - ProtocolVersionId::Version24 | ProtocolVersionId::Version25 => { - self.vm_1_5_0_increased_memory - } - } - } - - pub fn load_estimate_gas_blocking() -> Self { - Self { - pre_virtual_blocks: BaseSystemContracts::estimate_gas_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::estimate_gas_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::estimate_gas_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::estimate_gas_post_boojum(), - post_allowlist_removal: BaseSystemContracts::estimate_gas_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::estimate_gas_post_1_4_1(), - post_1_4_2: BaseSystemContracts::estimate_gas_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::estimate_gas_1_5_0_small_memory(), - vm_1_5_0_increased_memory: - BaseSystemContracts::estimate_gas_post_1_5_0_increased_memory(), - } - } - - pub fn load_eth_call_blocking() -> Self { - Self { - pre_virtual_blocks: BaseSystemContracts::playground_pre_virtual_blocks(), - post_virtual_blocks: BaseSystemContracts::playground_post_virtual_blocks(), - post_virtual_blocks_finish_upgrade_fix: - BaseSystemContracts::playground_post_virtual_blocks_finish_upgrade_fix(), - post_boojum: BaseSystemContracts::playground_post_boojum(), - post_allowlist_removal: BaseSystemContracts::playground_post_allowlist_removal(), - post_1_4_1: BaseSystemContracts::playground_post_1_4_1(), - post_1_4_2: BaseSystemContracts::playground_post_1_4_2(), - vm_1_5_0_small_memory: BaseSystemContracts::playground_1_5_0_small_memory(), - vm_1_5_0_increased_memory: BaseSystemContracts::playground_post_1_5_0_increased_memory( - ), - } - } -} - -/// Smart contracts to be used in the API sandbox requests, e.g. for estimating gas and -/// performing `eth_call` requests. -#[derive(Debug, Clone)] -pub struct ApiContracts { - /// Contracts to be used when estimating gas. - /// These contracts (mainly, bootloader) normally should be tuned to provide accurate - /// execution metrics. - pub(crate) estimate_gas: MultiVMBaseSystemContracts, - /// Contracts to be used when performing `eth_call` requests. - /// These contracts (mainly, bootloader) normally should be tuned to provide better UX - /// experience (e.g. revert messages). - pub(crate) eth_call: MultiVMBaseSystemContracts, +/// Oneshot executor options used by the API server sandbox. +#[derive(Debug)] +pub struct SandboxExecutorOptions { + /// Env parameters to be used when estimating gas. + pub(crate) estimate_gas: OneshotEnvParameters, + /// Env parameters to be used when performing `eth_call` requests. + pub(crate) eth_call: OneshotEnvParameters, } -impl ApiContracts { +impl SandboxExecutorOptions { /// Loads the contracts from the local file system. /// This method is *currently* preferred to be used in all contexts, /// given that there is no way to fetch "playground" contracts from the main node. - pub async fn load_from_disk() -> anyhow::Result { - tokio::task::spawn_blocking(Self::load_from_disk_blocking) - .await - .context("loading `ApiContracts` panicked") + pub async fn new( + chain_id: L2ChainId, + operator_account: AccountTreeId, + validation_computational_gas_limit: u32, + ) -> anyhow::Result { + Ok(Self { + estimate_gas: OneshotEnvParameters::for_gas_estimation(chain_id, operator_account) + .await?, + eth_call: OneshotEnvParameters::for_execution( + chain_id, + operator_account, + validation_computational_gas_limit, + ) + .await?, + }) } - /// Blocking version of [`Self::load_from_disk()`]. - pub fn load_from_disk_blocking() -> Self { - Self { - estimate_gas: MultiVMBaseSystemContracts::load_estimate_gas_blocking(), - eth_call: MultiVMBaseSystemContracts::load_eth_call_blocking(), - } + pub(crate) async fn mock() -> Self { + Self::new(L2ChainId::default(), AccountTreeId::default(), u32::MAX) + .await + .unwrap() } } @@ -254,7 +165,7 @@ impl TxSenderBuilder { self, batch_fee_input_provider: Arc, vm_concurrency_limiter: Arc, - api_contracts: ApiContracts, + executor_options: SandboxExecutorOptions, storage_caches: PostgresStorageCaches, ) -> TxSender { // Use noop sealer if no sealer was explicitly provided. @@ -267,18 +178,21 @@ impl TxSenderBuilder { .config .vm_execution_cache_misses_limit .unwrap_or(usize::MAX); + let executor = SandboxExecutor::real( + executor_options, + storage_caches, + missed_storage_invocation_limit, + ); TxSender(Arc::new(TxSenderInner { sender_config: self.config, tx_sink: self.tx_sink, replica_connection_pool: self.replica_connection_pool, batch_fee_input_provider, - api_contracts, vm_concurrency_limiter, - storage_caches, whitelisted_tokens_for_aa_cache, sealer, - executor: TransactionExecutor::real(missed_storage_invocation_limit), + executor, })) } } @@ -327,16 +241,13 @@ pub struct TxSenderInner { pub replica_connection_pool: ConnectionPool, // Used to keep track of gas prices for the fee ticker. pub batch_fee_input_provider: Arc, - pub(super) api_contracts: ApiContracts, /// Used to limit the amount of VMs that can be executed simultaneously. pub(super) vm_concurrency_limiter: Arc, - // Caches used in VM execution. - storage_caches: PostgresStorageCaches, // Cache for white-listed tokens. pub(super) whitelisted_tokens_for_aa_cache: Arc>>, /// Batch sealer used to check whether transaction can be executed by the sequencer. pub(super) sealer: Arc, - pub(super) executor: TransactionExecutor, + pub(super) executor: SandboxExecutor, } #[derive(Clone)] @@ -353,10 +264,6 @@ impl TxSender { Arc::clone(&self.0.vm_concurrency_limiter) } - pub(crate) fn storage_caches(&self) -> PostgresStorageCaches { - self.0.storage_caches.clone() - } - pub(crate) async fn read_whitelisted_tokens_for_aa_cache(&self) -> Vec
{ self.0.whitelisted_tokens_for_aa_cache.read().await.clone() } @@ -383,8 +290,20 @@ impl TxSender { stage_latency.observe(); let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::DryRun); - let setup_args = self.call_args(&tx, None).await?; + // **Important.** For the main node, this method acquires a DB connection inside `get_batch_fee_input()`. + // Thus, it must not be called it if you're holding a DB connection already. + let fee_input = self + .0 + .batch_fee_input_provider + .get_batch_fee_input() + .await + .context("cannot get batch fee input")?; + let vm_permit = self.0.vm_concurrency_limiter.acquire().await; + let action = SandboxAction::Execution { + fee_input, + tx: tx.clone(), + }; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; let mut connection = self.acquire_replica_connection().await?; let block_args = BlockArgs::pending(&mut connection).await?; @@ -392,15 +311,7 @@ impl TxSender { let execution_output = self .0 .executor - .execute_tx_in_sandbox( - vm_permit.clone(), - setup_args.clone(), - TxExecutionArgs::for_validation(tx.clone()), - connection, - block_args, - None, - OneshotTracingParams::default(), - ) + .execute_in_sandbox(vm_permit.clone(), connection, action, &block_args, None) .await?; tracing::info!( "Submit tx {tx_hash:?} with execution metrics {:?}", @@ -411,17 +322,16 @@ impl TxSender { let stage_latency = SANDBOX_METRICS.start_tx_submit_stage(tx_hash, SubmitTxStage::VerifyExecute); let connection = self.acquire_replica_connection().await?; - let computational_gas_limit = self.0.sender_config.validation_computational_gas_limit; let validation_result = self .0 .executor .validate_tx_in_sandbox( - connection, vm_permit, + connection, tx.clone(), - setup_args, block_args, - computational_gas_limit, + fee_input, + &self.read_whitelisted_tokens_for_aa_cache().await, ) .await; stage_latency.observe(); @@ -473,43 +383,6 @@ impl TxSender { } } - /// **Important.** For the main node, this method acquires a DB connection inside `get_batch_fee_input()`. - /// Thus, you shouldn't call it if you're holding a DB connection already. - async fn call_args( - &self, - tx: &L2Tx, - call_overrides: Option<&CallOverrides>, - ) -> anyhow::Result { - let fee_input = self - .0 - .batch_fee_input_provider - .get_batch_fee_input() - .await - .context("cannot get batch fee input")?; - Ok(TxSetupArgs { - execution_mode: if call_overrides.is_some() { - TxExecutionMode::EthCall - } else { - TxExecutionMode::VerifyExecute - }, - operator_account: AccountTreeId::new(self.0.sender_config.fee_account_addr), - fee_input, - base_system_contracts: self.0.api_contracts.eth_call.clone(), - caches: self.storage_caches(), - validation_computational_gas_limit: self - .0 - .sender_config - .validation_computational_gas_limit, - chain_id: self.0.sender_config.chain_id, - whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, - enforced_base_fee: if let Some(overrides) = call_overrides { - overrides.enforced_base_fee - } else { - Some(tx.common_data.fee.max_fee_per_gas.as_u64()) - }, - }) - } - async fn validate_tx( &self, tx: &L2Tx, @@ -677,332 +550,6 @@ impl TxSender { Ok(h256_to_u256(balance)) } - /// Given the gas_limit to be used for the body of the transaction, - /// returns the result for executing the transaction with such gas_limit - #[allow(clippy::too_many_arguments)] - async fn estimate_gas_step( - &self, - vm_permit: VmPermit, - mut tx: Transaction, - tx_gas_limit: u64, - gas_price_per_pubdata: u32, - fee_model_params: BatchFeeInput, - block_args: BlockArgs, - base_fee: u64, - vm_version: VmVersion, - state_override: Option, - ) -> anyhow::Result<(VmExecutionResultAndLogs, TransactionExecutionMetrics)> { - let gas_limit_with_overhead = tx_gas_limit - + derive_overhead( - tx_gas_limit, - gas_price_per_pubdata, - tx.encoding_len(), - tx.tx_format() as u8, - vm_version, - ) as u64; - // We need to ensure that we never use a gas limit that is higher than the maximum allowed - let forced_gas_limit = gas_limit_with_overhead.min(get_max_batch_gas_limit(vm_version)); - - match &mut tx.common_data { - ExecuteTransactionCommon::L1(l1_common_data) => { - l1_common_data.gas_limit = forced_gas_limit.into(); - let required_funds = - l1_common_data.gas_limit * l1_common_data.max_fee_per_gas + tx.execute.value; - l1_common_data.to_mint = required_funds; - } - ExecuteTransactionCommon::L2(l2_common_data) => { - l2_common_data.fee.gas_limit = forced_gas_limit.into(); - } - ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { - common_data.gas_limit = forced_gas_limit.into(); - - let required_funds = - common_data.gas_limit * common_data.max_fee_per_gas + tx.execute.value; - - common_data.to_mint = required_funds; - } - } - - let setup_args = self.args_for_gas_estimate(fee_model_params, base_fee).await; - let execution_args = TxExecutionArgs::for_gas_estimate(tx); - let connection = self.acquire_replica_connection().await?; - let execution_output = self - .0 - .executor - .execute_tx_in_sandbox( - vm_permit, - setup_args, - execution_args, - connection, - block_args, - state_override, - OneshotTracingParams::default(), - ) - .await?; - Ok((execution_output.vm, execution_output.metrics)) - } - - async fn args_for_gas_estimate(&self, fee_input: BatchFeeInput, base_fee: u64) -> TxSetupArgs { - let config = &self.0.sender_config; - TxSetupArgs { - execution_mode: TxExecutionMode::EstimateFee, - operator_account: AccountTreeId::new(config.fee_account_addr), - fee_input, - // We want to bypass the computation gas limit check for gas estimation - validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - base_system_contracts: self.0.api_contracts.estimate_gas.clone(), - caches: self.storage_caches(), - chain_id: config.chain_id, - whitelisted_tokens_for_aa: self.read_whitelisted_tokens_for_aa_cache().await, - enforced_base_fee: Some(base_fee), - } - } - - #[tracing::instrument(level = "debug", skip_all, fields( - initiator = ?tx.initiator_account(), - nonce = ?tx.nonce(), - ))] - pub async fn get_txs_fee_in_wei( - &self, - mut tx: Transaction, - estimated_fee_scale_factor: f64, - acceptable_overestimation: u64, - state_override: Option, - ) -> Result { - let estimation_started_at = Instant::now(); - - let mut connection = self.acquire_replica_connection().await?; - let block_args = BlockArgs::pending(&mut connection).await?; - let protocol_version = connection - .blocks_dal() - .pending_protocol_version() - .await - .context("failed getting pending protocol version")?; - let max_gas_limit = get_max_batch_gas_limit(protocol_version.into()); - drop(connection); - - let fee_input = adjust_pubdata_price_for_tx( - self.scaled_batch_fee_input().await?, - tx.gas_per_pubdata_byte_limit(), - // We do not have to adjust the params to the `gasPrice` of the transaction, since - // its gas price will be amended later on to suit the `fee_input` - None, - protocol_version.into(), - ); - - let (base_fee, gas_per_pubdata_byte) = - derive_base_fee_and_gas_per_pubdata(fee_input, protocol_version.into()); - match &mut tx.common_data { - ExecuteTransactionCommon::L2(common_data) => { - common_data.fee.max_fee_per_gas = base_fee.into(); - common_data.fee.max_priority_fee_per_gas = base_fee.into(); - } - ExecuteTransactionCommon::L1(common_data) => { - common_data.max_fee_per_gas = base_fee.into(); - } - ExecuteTransactionCommon::ProtocolUpgrade(common_data) => { - common_data.max_fee_per_gas = base_fee.into(); - } - } - - let hashed_key = get_code_key(&tx.initiator_account()); - // If the default account does not have enough funds for transferring `tx.value`, without taking into account the fee, - // there is no sense to estimate the fee. - let account_code_hash = self - .acquire_replica_connection() - .await? - .storage_web3_dal() - .get_value(&hashed_key) - .await - .with_context(|| { - format!( - "failed getting code hash for account {:?}", - tx.initiator_account() - ) - })?; - - if !tx.is_l1() && account_code_hash == H256::zero() { - let balance = match state_override - .as_ref() - .and_then(|overrides| overrides.get(&tx.initiator_account())) - .and_then(|account| account.balance) - { - Some(balance) => balance, - None => self.get_balance(&tx.initiator_account()).await?, - }; - - if tx.execute.value > balance { - tracing::info!( - "fee estimation failed on validation step. - account: {} does not have enough funds for for transferring tx.value: {}.", - tx.initiator_account(), - tx.execute.value - ); - return Err(SubmitTxError::InsufficientFundsForTransfer); - } - } - - // For L2 transactions we need a properly formatted signature - if let ExecuteTransactionCommon::L2(l2_common_data) = &mut tx.common_data { - if l2_common_data.signature.is_empty() { - l2_common_data.signature = PackedEthSignature::default().serialize_packed().into(); - } - } - - // Acquire the vm token for the whole duration of the binary search. - let vm_permit = self.0.vm_concurrency_limiter.acquire().await; - let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; - - // When the pubdata cost grows very high, the total gas limit required may become very high as well. If - // we do binary search over any possible gas limit naively, we may end up with a very high number of iterations, - // which affects performance. - // - // To optimize for this case, we first calculate the amount of gas needed to cover for the pubdata. After that, we - // need to do a smaller binary search that is focused on computational gas limit only. - let additional_gas_for_pubdata = if tx.is_l1() { - // For L1 transactions the pubdata priced in such a way that the maximal computational - // gas limit should be enough to cover for the pubdata as well, so no additional gas is provided there. - 0u64 - } else { - // For L2 transactions, we estimate the amount of gas needed to cover for the pubdata by creating a transaction with infinite gas limit. - // And getting how much pubdata it used. - - // In theory, if the transaction has failed with such large gas limit, we could have returned an API error here right away, - // but doing it later on keeps the code more lean. - let (result, _) = self - .estimate_gas_step( - vm_permit.clone(), - tx.clone(), - max_gas_limit, - gas_per_pubdata_byte as u32, - fee_input, - block_args, - base_fee, - protocol_version.into(), - state_override.clone(), - ) - .await - .context("estimate_gas step failed")?; - - // It is assumed that there is no overflow here - (result.statistics.pubdata_published as u64) * gas_per_pubdata_byte - }; - - // We are using binary search to find the minimal values of gas_limit under which - // the transaction succeeds - let mut lower_bound = 0; - let mut upper_bound = MAX_L2_TX_GAS_LIMIT; - tracing::trace!( - "preparation took {:?}, starting binary search", - estimation_started_at.elapsed() - ); - - let mut number_of_iterations = 0usize; - while lower_bound + acceptable_overestimation < upper_bound { - let mid = (lower_bound + upper_bound) / 2; - // There is no way to distinct between errors due to out of gas - // or normal execution errors, so we just hope that increasing the - // gas limit will make the transaction successful - let iteration_started_at = Instant::now(); - let try_gas_limit = additional_gas_for_pubdata + mid; - let (result, _) = self - .estimate_gas_step( - vm_permit.clone(), - tx.clone(), - try_gas_limit, - gas_per_pubdata_byte as u32, - fee_input, - block_args, - base_fee, - protocol_version.into(), - state_override.clone(), - ) - .await - .context("estimate_gas step failed")?; - - if result.result.is_failed() { - lower_bound = mid + 1; - } else { - upper_bound = mid; - } - - tracing::trace!( - "iteration {number_of_iterations} took {:?}. lower_bound: {lower_bound}, upper_bound: {upper_bound}", - iteration_started_at.elapsed() - ); - number_of_iterations += 1; - } - SANDBOX_METRICS - .estimate_gas_binary_search_iterations - .observe(number_of_iterations); - - let suggested_gas_limit = - ((upper_bound + additional_gas_for_pubdata) as f64 * estimated_fee_scale_factor) as u64; - let (result, tx_metrics) = self - .estimate_gas_step( - vm_permit, - tx.clone(), - suggested_gas_limit, - gas_per_pubdata_byte as u32, - fee_input, - block_args, - base_fee, - protocol_version.into(), - state_override, - ) - .await - .context("final estimate_gas step failed")?; - - result.into_api_call_result()?; - self.ensure_tx_executable(&tx, &tx_metrics, false)?; - - // Now, we need to calculate the final overhead for the transaction. - let overhead = derive_overhead( - suggested_gas_limit, - gas_per_pubdata_byte as u32, - tx.encoding_len(), - tx.tx_format() as u8, - protocol_version.into(), - ) as u64; - - let full_gas_limit = match suggested_gas_limit.overflowing_add(overhead) { - (value, false) => { - if value > max_gas_limit { - return Err(SubmitTxError::ExecutionReverted( - "exceeds block gas limit".to_string(), - vec![], - )); - } - - value - } - (_, true) => { - return Err(SubmitTxError::ExecutionReverted( - "exceeds block gas limit".to_string(), - vec![], - )); - } - }; - - let gas_for_pubdata = (tx_metrics.pubdata_published as u64) * gas_per_pubdata_byte; - let estimated_gas_for_pubdata = - (gas_for_pubdata as f64 * estimated_fee_scale_factor) as u64; - - tracing::debug!( - "gas for pubdata: {estimated_gas_for_pubdata}, computational gas: {}, overhead gas: {overhead} \ - (with params base_fee: {base_fee}, gas_per_pubdata_byte: {gas_per_pubdata_byte}) \ - estimated_fee_scale_factor: {estimated_fee_scale_factor}", - suggested_gas_limit - estimated_gas_for_pubdata, - ); - - Ok(Fee { - max_fee_per_gas: base_fee.into(), - max_priority_fee_per_gas: 0u32.into(), - gas_limit: full_gas_limit.into(), - gas_per_pubdata_limit: gas_per_pubdata_byte.into(), - }) - } - // For now, both L1 gas price and pubdata price are scaled with the same coefficient async fn scaled_batch_fee_input(&self) -> anyhow::Result { self.0 @@ -1014,30 +561,41 @@ impl TxSender { .await } - pub async fn eth_call( + pub(crate) async fn eth_call( &self, block_args: BlockArgs, call_overrides: CallOverrides, - tx: L2Tx, + call: L2Tx, state_override: Option, ) -> Result, SubmitTxError> { let vm_permit = self.0.vm_concurrency_limiter.acquire().await; let vm_permit = vm_permit.ok_or(SubmitTxError::ServerShuttingDown)?; - let setup_args = self.call_args(&tx, Some(&call_overrides)).await?; - let connection = self.acquire_replica_connection().await?; + let mut connection; + let fee_input = if block_args.resolves_to_latest_sealed_l2_block() { + let fee_input = self + .0 + .batch_fee_input_provider + .get_batch_fee_input() + .await?; + // It is important to acquire a connection after calling the provider; see the comment above. + connection = self.acquire_replica_connection().await?; + fee_input + } else { + connection = self.acquire_replica_connection().await?; + block_args.historical_fee_input(&mut connection).await? + }; + + let action = SandboxAction::Call { + call, + fee_input, + enforced_base_fee: call_overrides.enforced_base_fee, + tracing_params: OneshotTracingParams::default(), + }; let result = self .0 .executor - .execute_tx_in_sandbox( - vm_permit, - setup_args, - TxExecutionArgs::for_eth_call(tx), - connection, - block_args, - state_override, - OneshotTracingParams::default(), - ) + .execute_in_sandbox(vm_permit, connection, action, &block_args, state_override) .await?; result.vm.into_api_call_result() } diff --git a/core/node/api_server/src/tx_sender/result.rs b/core/node/api_server/src/tx_sender/result.rs index f4bda54efc6..a49313f0dd6 100644 --- a/core/node/api_server/src/tx_sender/result.rs +++ b/core/node/api_server/src/tx_sender/result.rs @@ -158,11 +158,27 @@ impl From for SubmitTxError { } } -pub(crate) trait ApiCallResult { +pub(crate) trait ApiCallResult: Sized { + fn check_api_call_result(&self) -> Result<(), SubmitTxError>; + fn into_api_call_result(self) -> Result, SubmitTxError>; } impl ApiCallResult for VmExecutionResultAndLogs { + fn check_api_call_result(&self) -> Result<(), SubmitTxError> { + match &self.result { + ExecutionResult::Success { .. } => Ok(()), + ExecutionResult::Revert { output } => Err(SubmitTxError::ExecutionReverted( + output.to_user_friendly_string(), + output.encoded_data(), + )), + ExecutionResult::Halt { reason } => { + let output: SandboxExecutionError = reason.clone().into(); + Err(output.into()) + } + } + } + fn into_api_call_result(self) -> Result, SubmitTxError> { match self.result { ExecutionResult::Success { output } => Ok(output), diff --git a/core/node/api_server/src/tx_sender/tests/call.rs b/core/node/api_server/src/tx_sender/tests/call.rs new file mode 100644 index 00000000000..bdddb8e3895 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/call.rs @@ -0,0 +1,253 @@ +//! Tests for `eth_call`. + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::{ + api::state_override::OverrideAccount, transaction_request::CallRequest, K256PrivateKey, +}; + +use super::*; +use crate::testonly::{decode_u256_output, Call3Result, Call3Value, StateBuilder, TestAccount}; + +#[tokio::test] +async fn eth_call_requires_single_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + let genesis_params = GenesisParams::mock(); + insert_genesis_batch(&mut storage, &genesis_params) + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + + let tx = create_l2_transaction(10, 100); + let tx_hash = tx.hash(); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_call_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { + output: b"success!".to_vec(), + } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender( + pool.clone(), + genesis_params.config().l2_chain_id, + tx_executor, + ) + .await; + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + let output = tx_sender + .eth_call(block_args, call_overrides, tx, None) + .await + .unwrap(); + assert_eq!(output, b"success!"); +} + +async fn test_call( + tx_sender: &TxSender, + state_override: StateOverride, + mut call: CallRequest, +) -> Result, SubmitTxError> { + call.gas = call.gas.max(Some(10_000_000.into())); + let call = L2Tx::from_request(call.into(), usize::MAX).unwrap(); + + let mut storage = tx_sender + .0 + .replica_connection_pool + .connection() + .await + .unwrap(); + let block_args = BlockArgs::pending(&mut storage).await.unwrap(); + drop(storage); + let call_overrides = CallOverrides { + enforced_base_fee: None, + }; + + tx_sender + .eth_call(block_args, call_overrides, call, Some(state_override)) + .await +} + +#[tokio::test] +async fn eth_call_with_balance() { + let alice = K256PrivateKey::random(); + let initial_balance = 123_456_789.into(); + let account_overrides = OverrideAccount { + balance: Some(initial_balance), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let call = alice.query_base_token_balance(); + let output = test_call(&tx_sender, state_override, call).await.unwrap(); + assert_eq!(decode_u256_output(&output), initial_balance); +} + +#[tokio::test] +async fn eth_call_with_transfer() { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let initial_balance = transfer_value * 5 / 3; + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_balance(alice.address(), initial_balance) + .build(); + + let transfer = alice.create_transfer(transfer_value); + let multicall = alice.multicall_with_value( + transfer_value, + &[transfer.into(), alice.query_base_token_balance().into()], + ); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + assert_eq!(call_results.len(), 2); + assert!( + call_results[0].success && call_results[1].success, + "{call_results:?}" + ); + assert!(call_results[0].return_data.is_empty(), "{call_results:?}"); + + let balance = call_results[1].as_u256(); + // The bootloader doesn't compute gas refunds in the call mode, so the equality is exact + assert_eq!(balance, initial_balance - transfer_value); +} + +#[tokio::test] +async fn eth_call_with_counter() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(42).build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call( + &tx_sender, + state_override.clone(), + alice.query_counter_value(), + ) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 42.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), false).into(); + let output = test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + assert_eq!(decode_u256_output(&output), 45.into()); + + let tx_as_call = alice.create_counter_tx(3.into(), true).into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!( + err, + SubmitTxError::ExecutionReverted(msg, _) if msg.contains("This method always reverts") + ); +} + +#[tokio::test] +async fn eth_call_with_counter_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_multicall3_contract() + .with_counter_contract(0) + .build(); + + let multicall = alice.multicall_with_value( + 0.into(), + &[ + alice.create_counter_tx(1.into(), false).into(), + Call3Value::from(alice.create_counter_tx(2.into(), true)).allow_failure(), + alice.query_counter_value().into(), + alice.create_counter_tx(3.into(), false).into(), + ], + ); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let output = test_call(&tx_sender, state_override, multicall) + .await + .unwrap(); + let call_results = Call3Result::parse(&output); + + assert_eq!( + call_results + .iter() + .map(|result| result.success) + .collect::>(), + [true, false, true, true] + ); + let counter_values: Vec<_> = call_results + .iter() + .filter_map(|result| { + if !result.success { + return None; + } + Some(decode_u256_output(&result.return_data).as_u32()) + }) + .collect(); + assert_eq!(counter_values, [1, 1, 4]); +} + +#[tokio::test] +async fn eth_call_out_of_gas() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let tx_as_call = alice.create_infinite_loop_tx().into(); + let err = test_call(&tx_sender, state_override, tx_as_call) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); +} + +#[tokio::test] +async fn eth_call_with_load_test_transactions() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + // Deploys (naturally) don't work for calls, hence a separate set of test cases. + let load_test_cases_for_call = [ + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + deploys: 0, + recursive_calls: 20, + ..LoadnextContractExecutionParams::default() + }, + LoadnextContractExecutionParams { + reads: 100, + writes: 100, + ..LoadnextContractExecutionParams::empty() + }, + ]; + + for tx_params in load_test_cases_for_call { + println!("Executing {tx_params:?}"); + let tx_as_call = alice.create_load_test_tx(tx_params).into(); + test_call(&tx_sender, state_override.clone(), tx_as_call) + .await + .unwrap(); + } +} diff --git a/core/node/api_server/src/tx_sender/tests/gas_estimation.rs b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs new file mode 100644 index 00000000000..086313a8562 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/gas_estimation.rs @@ -0,0 +1,466 @@ +//! Tests for gas estimation (mostly with the real oneshot VM executor). + +use std::collections::HashMap; + +use assert_matches::assert_matches; +use test_casing::{test_casing, Product}; +use zksync_system_constants::CODE_ORACLE_ADDRESS; +use zksync_types::{ + api::state_override::{OverrideAccount, OverrideState}, + web3::keccak256, + K256PrivateKey, +}; +use zksync_utils::bytecode::hash_bytecode; + +use super::*; +use crate::{ + testonly::{StateBuilder, TestAccount}, + tx_sender::gas_estimation::GasEstimator, +}; + +/// Initial pivot multiplier empirically sufficient for most tx types. +const DEFAULT_MULTIPLIER: f64 = 64.0 / 63.0; + +#[tokio::test] +async fn initial_gas_estimation_is_somewhat_accurate() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let alice = K256PrivateKey::random(); + let transfer_value = U256::from(1_000_000_000); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + assert!(initial_estimate.gas_charged_for_pubdata > 0); + assert!(initial_estimate.operator_overhead > 0); + let total_gas_charged = initial_estimate.total_gas_charged.unwrap(); + assert!( + total_gas_charged + > initial_estimate.gas_charged_for_pubdata + initial_estimate.operator_overhead, + "{initial_estimate:?}" + ); + + // Check that a transaction fails if supplied with the lower bound. + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + assert!(lower_bound < total_gas_charged, "{initial_estimate:?}"); + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = total_gas_charged * 64 / 63; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn initial_estimate_for_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let alice = K256PrivateKey::random(); + // Set the array length in the load test contract to 100, so that reads don't fail. + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[test_casing(2, [false, true])] +#[tokio::test] +async fn initial_estimate_for_deep_recursion(with_reads: bool) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + + // Reads are chosen because they represent the worst case. Reads don't influence the amount of pubdata; + // i.e., they don't make it easier to execute a transaction because of additional gas reserved for pubdata. + // OTOH, reads still increase the amount of computational gas used on each nested call. + // + // Initial pivot multipliers below are the smallest ones with 0.1 precision. `DEFAULT_MULTIPLIER` works for smaller + // recursion depths because the transaction emits enough pubdata to cover gas deductions due to the 63/64 rule. + let depths_and_multipliers: &[_] = if with_reads { + &[(25, DEFAULT_MULTIPLIER), (50, 1.2), (75, 1.4), (100, 1.7)] + } else { + &[ + (50, DEFAULT_MULTIPLIER), + (75, 1.2), + (100, 1.4), + (125, 1.7), + (150, 2.1), + ] + }; + for &(recursion_depth, multiplier) in depths_and_multipliers { + println!("Testing recursion depth {recursion_depth}"); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: recursion_depth, + reads: if with_reads { 10 } else { 0 }, + ..LoadnextContractExecutionParams::empty() + }); + test_initial_estimate(state_override.clone(), tx, multiplier).await; + } +} + +#[tokio::test] +async fn initial_estimate_for_deep_recursion_with_large_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_load_test_contract() + .inflate_bytecode(StateBuilder::LOAD_TEST_ADDRESS, 50_000) + .build(); + let tx = alice.create_load_test_tx(LoadnextContractExecutionParams { + recursive_calls: 100, + ..LoadnextContractExecutionParams::empty() + }); + + test_initial_estimate(state_override, tx, 1.35).await; +} + +/// Tests the lower bound and initial pivot extracted from the initial estimate (one with effectively infinite gas amount). +/// Returns the VM result for a VM run with the initial pivot. +async fn test_initial_estimate( + state_override: StateOverride, + tx: L2Tx, + initial_pivot_multiplier: f64, +) -> VmExecutionResultAndLogs { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + let initial_estimate = estimator.initialize().await.unwrap(); + + let lower_bound = initial_estimate.lower_gas_bound_without_overhead().unwrap() + + initial_estimate.operator_overhead; + let (vm_result, _) = estimator.unadjusted_step(lower_bound).await.unwrap(); + assert!(vm_result.result.is_failed(), "{:?}", vm_result.result); + + // A slightly larger limit should work. + let initial_pivot = + (initial_estimate.total_gas_charged.unwrap() as f64 * initial_pivot_multiplier) as u64; + let (vm_result, _) = estimator.unadjusted_step(initial_pivot).await.unwrap(); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); + vm_result +} + +async fn test_initial_estimate_error(state_override: StateOverride, tx: L2Tx) -> SubmitTxError { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let mut estimator = GasEstimator::new(&tx_sender, tx.into(), Some(state_override)) + .await + .unwrap(); + estimator.adjust_transaction_fee(); + estimator.initialize().await.unwrap_err() +} + +/// Estimates both transactions with initial writes and cleanup. +#[test_casing(4, [10, 50, 200, 1_000])] +#[tokio::test] +async fn initial_estimate_for_expensive_contract(write_count: usize) { + let alice = K256PrivateKey::random(); + let mut state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + + let contract_logs = vm_result.logs.storage_logs.into_iter().filter_map(|log| { + (*log.log.key.address() == StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .then_some((*log.log.key.key(), log.log.value)) + }); + let contract_logs: HashMap<_, _> = contract_logs.collect(); + assert!(contract_logs.len() >= write_count, "{contract_logs:?}"); + + state_override + .get_mut(&StateBuilder::EXPENSIVE_CONTRACT_ADDRESS) + .unwrap() + .state = Some(OverrideState::StateDiff(contract_logs)); + let tx = alice.create_expensive_cleanup_tx(); + test_initial_estimate(state_override, tx, DEFAULT_MULTIPLIER).await; +} + +#[tokio::test] +async fn initial_estimate_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Test contracts that are already decommitted when requested from the precompiles test contract. + let genesis_params = GenesisParams::mock(); + let code_oracle_bytecode = genesis_params + .system_contracts() + .iter() + .find_map(|contract| { + (*contract.account_id.address() == CODE_ORACLE_ADDRESS).then_some(&contract.bytecode) + }) + .expect("no code oracle"); + let code_oracle_bytecode_hash = hash_bytecode(code_oracle_bytecode); + let code_oracle_keccak_hash = H256(keccak256(code_oracle_bytecode)); + + let warm_bytecode_hashes = [ + (code_oracle_bytecode_hash, code_oracle_keccak_hash), + (contract_bytecode_hash, contract_keccak_hash), + ]; + let mut decomitter_stats = 0.0; + for (hash, keccak_hash) in warm_bytecode_hashes { + println!("Testing bytecode: {hash:?}"); + let tx = alice.create_code_oracle_tx(hash, keccak_hash); + let vm_result = test_initial_estimate(state_override.clone(), tx, DEFAULT_MULTIPLIER).await; + let stats = &vm_result.statistics.circuit_statistic; + decomitter_stats = stats.code_decommitter.max(decomitter_stats); + } + assert!(decomitter_stats > 0.0); + + println!("Testing large bytecode"); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + let vm_result = test_initial_estimate(state_override, tx, 1.05).await; + // Sanity check: the transaction should spend significantly more on decommitment compared to previous ones + let new_decomitter_stats = vm_result.statistics.circuit_statistic.code_decommitter; + assert!( + new_decomitter_stats > decomitter_stats * 1.5, + "old={decomitter_stats}, new={new_decomitter_stats}" + ); +} + +#[tokio::test] +async fn initial_estimate_with_large_free_bytecode() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_precompiles_contract() + .inflate_bytecode(StateBuilder::PRECOMPILES_CONTRACT_ADDRESS, 50_000) + .build(); + let contract_override = state_override + .get(&StateBuilder::PRECOMPILES_CONTRACT_ADDRESS) + .unwrap(); + let contract_bytecode = contract_override.code.as_ref().unwrap(); + let contract_bytecode_hash = contract_bytecode.hash(); + let contract_keccak_hash = H256(keccak256(contract_bytecode.as_ref())); + + // Ask the test contract to decommit itself. This should refund the decommit costs, but it will be charged at first. + let tx = alice.create_code_oracle_tx(contract_bytecode_hash, contract_keccak_hash); + test_initial_estimate(state_override, tx, 1.05).await; +} + +#[tokio::test] +async fn revert_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let err = test_initial_estimate_error(state_override, tx).await; + let SubmitTxError::ExecutionReverted(err, _) = err else { + panic!("Unexpected error: {err:?}"); + }; + assert_eq!(err, "This method always reverts"); +} + +#[tokio::test] +async fn out_of_gas_during_initial_estimate() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let err = test_initial_estimate_error(state_override, tx).await; + // Unfortunately, we don't provide human-readable out-of-gas errors at the time + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); +} + +#[tokio::test] +async fn insufficient_funds_error_for_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let alice = K256PrivateKey::random(); + let tx = alice.create_transfer(1_000_000_000.into()); + let fee_scale_factor = 1.0; + // Without overrides, the transaction should fail because of insufficient balance. + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + fee_scale_factor, + 1_000, + None, + BinarySearchKind::Full, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::InsufficientFundsForTransfer); +} + +async fn test_estimating_gas( + state_override: StateOverride, + tx: L2Tx, + acceptable_overestimation: u64, +) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let fee_scale_factor = 1.0; + let fee = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Full, + ) + .await + .unwrap(); + // Sanity-check gas limit + let gas_limit_after_full_search = u64::try_from(fee.gas_limit).unwrap(); + assert!( + (10_000..10_000_000).contains(&gas_limit_after_full_search), + "{fee:?}" + ); + + let fee = tx_sender + .get_txs_fee_in_wei( + tx.into(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + BinarySearchKind::Optimized, + ) + .await + .unwrap(); + let gas_limit_after_optimized_search = u64::try_from(fee.gas_limit).unwrap(); + + let diff = gas_limit_after_full_search.abs_diff(gas_limit_after_optimized_search); + assert!( + diff <= acceptable_overestimation, + "full={gas_limit_after_full_search}, optimized={gas_limit_after_optimized_search}" + ); +} + +#[test_casing(3, [0, 100, 1_000])] +#[tokio::test] +async fn estimating_gas_for_transfer(acceptable_overestimation: u64) { + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + let account_overrides = OverrideAccount { + balance: Some(transfer_value * 2), + ..OverrideAccount::default() + }; + let state_override = StateOverride::new(HashMap::from([(alice.address(), account_overrides)])); + let tx = alice.create_transfer(transfer_value); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(10, Product((LOAD_TEST_CASES, [0, 100])))] +#[tokio::test] +async fn estimating_gas_for_load_test_tx( + tx_params: LoadnextContractExecutionParams, + acceptable_overestimation: u64, +) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_load_test_contract().build(); + let tx = alice.create_load_test_tx(tx_params); + + test_estimating_gas(state_override, tx, acceptable_overestimation).await; +} + +#[test_casing(4, [10, 50, 100, 200])] +#[tokio::test] +async fn estimating_gas_for_expensive_txs(write_count: usize) { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_expensive_contract().build(); + let tx = alice.create_expensive_tx(write_count); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_code_oracle_tx() { + let alice = K256PrivateKey::random(); + // Add another contract that is never executed, but has a large bytecode. + let huge_contact_address = Address::repeat_byte(23); + let huge_contract_bytecode = vec![0_u8; 10_001 * 32]; + let huge_contract_bytecode_hash = hash_bytecode(&huge_contract_bytecode); + let huge_contract_keccak_hash = H256(keccak256(&huge_contract_bytecode)); + + let state_override = StateBuilder::default() + .with_precompiles_contract() + .with_contract(huge_contact_address, huge_contract_bytecode) + .build(); + let tx = alice.create_code_oracle_tx(huge_contract_bytecode_hash, huge_contract_keccak_hash); + + test_estimating_gas(state_override, tx, 0).await; +} + +#[tokio::test] +async fn estimating_gas_for_reverting_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default().with_counter_contract(0).build(); + + let tx = alice.create_counter_tx(1.into(), true); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(..)); + } +} + +#[tokio::test] +async fn estimating_gas_for_infinite_loop_tx() { + let alice = K256PrivateKey::random(); + let state_override = StateBuilder::default() + .with_infinite_loop_contract() + .build(); + + let tx = alice.create_infinite_loop_tx(); + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + + let fee_scale_factor = 1.0; + let acceptable_overestimation = 0; + for binary_search_kind in [BinarySearchKind::Full, BinarySearchKind::Optimized] { + let err = tx_sender + .get_txs_fee_in_wei( + tx.clone().into(), + fee_scale_factor, + acceptable_overestimation, + Some(state_override.clone()), + binary_search_kind, + ) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::ExecutionReverted(msg, _) if msg.is_empty()); + } +} diff --git a/core/node/api_server/src/tx_sender/tests.rs b/core/node/api_server/src/tx_sender/tests/mod.rs similarity index 50% rename from core/node/api_server/src/tx_sender/tests.rs rename to core/node/api_server/src/tx_sender/tests/mod.rs index 0ac3eb0b4f3..3d48e320abc 100644 --- a/core/node/api_server/src/tx_sender/tests.rs +++ b/core/node/api_server/src/tx_sender/tests/mod.rs @@ -1,18 +1,43 @@ //! Tests for the transaction sender. -use std::time::Duration; - -use assert_matches::assert_matches; -use zksync_multivm::interface::ExecutionResult; -use zksync_node_fee_model::MockBatchFeeParamsProvider; +use test_casing::TestCases; +use zksync_contracts::test_contracts::LoadnextContractExecutionParams; use zksync_node_genesis::{insert_genesis_batch, GenesisParams}; -use zksync_node_test_utils::{create_l2_block, create_l2_transaction, prepare_recovery_snapshot}; -use zksync_types::{api, get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; -use zksync_utils::u256_to_h256; +use zksync_node_test_utils::{create_l2_block, prepare_recovery_snapshot}; +use zksync_types::{get_nonce_key, L1BatchNumber, L2BlockNumber, StorageLog}; use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::*; -use crate::{execution_sandbox::BlockStartInfo, web3::testonly::create_test_tx_sender}; +use crate::web3::testonly::create_test_tx_sender; + +mod call; +mod gas_estimation; +mod send_tx; + +const LOAD_TEST_CASES: TestCases = test_casing::cases! {[ + LoadnextContractExecutionParams::default(), + // No storage modification + LoadnextContractExecutionParams { + writes: 0, + events: 0, + ..LoadnextContractExecutionParams::default() + }, + // Moderately deep recursion (very deep recursion is tested separately) + LoadnextContractExecutionParams { + recursive_calls: 10, + ..LoadnextContractExecutionParams::default() + }, + // No deploys + LoadnextContractExecutionParams { + deploys: 0, + ..LoadnextContractExecutionParams::default() + }, + // Lots of deploys + LoadnextContractExecutionParams { + deploys: 10, + ..LoadnextContractExecutionParams::default() + }, +]}; #[tokio::test] async fn getting_nonce_for_account() { @@ -32,7 +57,8 @@ async fn getting_nonce_for_account() { .await .unwrap(); - let tx_executor = MockOneshotExecutor::default().into(); + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; let nonce = tx_sender.get_expected_nonce(test_address).await.unwrap(); @@ -82,7 +108,8 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { .await; let l2_chain_id = L2ChainId::default(); - let tx_executor = MockOneshotExecutor::default().into(); + let tx_executor = MockOneshotExecutor::default(); + let tx_executor = SandboxExecutor::mock(tx_executor).await; let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; storage @@ -109,94 +136,26 @@ async fn getting_nonce_for_account_after_snapshot_recovery() { assert_eq!(nonce, Nonce(0)); } -#[tokio::test] -async fn submitting_tx_requires_one_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; - let mut storage = pool.connection().await.unwrap(); - insert_genesis_batch(&mut storage, &GenesisParams::mock()) - .await - .unwrap(); - - let l2_chain_id = L2ChainId::default(); - let fee_input = MockBatchFeeParamsProvider::default() - .get_batch_fee_input_scaled(1.0, 1.0) - .await - .unwrap(); - let (base_fee, gas_per_pubdata) = - derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); - let tx = create_l2_transaction(base_fee, gas_per_pubdata); - let tx_hash = tx.hash(); - - // Manually set sufficient balance for the tx initiator. - let balance_key = storage_key_for_eth_balance(&tx.initiator_account()); - let storage_log = StorageLog::new_write_log(balance_key, u256_to_h256(U256::one() << 64)); - storage - .storage_logs_dal() - .append_storage_logs(L2BlockNumber(0), &[storage_log]) - .await - .unwrap(); - drop(storage); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_tx_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { output: vec![] } - }); - let tx_executor = tx_executor.into(); - let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; - - let submission_result = tx_sender.submit_tx(tx).await.unwrap(); - assert_matches!(submission_result.0, L2TxSubmissionResult::Added); - - let mut storage = pool.connection().await.unwrap(); - storage - .transactions_web3_dal() - .get_transaction_by_hash(tx_hash, l2_chain_id) - .await - .unwrap() - .expect("transaction is not persisted"); -} - -#[tokio::test] -async fn eth_call_requires_single_connection() { - let pool = ConnectionPool::::constrained_test_pool(1).await; +async fn create_real_tx_sender(pool: ConnectionPool) -> TxSender { let mut storage = pool.connection().await.unwrap(); let genesis_params = GenesisParams::mock(); insert_genesis_batch(&mut storage, &genesis_params) .await .unwrap(); - let start_info = BlockStartInfo::new(&mut storage, Duration::MAX) - .await - .unwrap(); - let block_id = api::BlockId::Number(api::BlockNumber::Latest); - let block_args = BlockArgs::new(&mut storage, block_id, &start_info) - .await - .unwrap(); drop(storage); - let tx = create_l2_transaction(10, 100); - let tx_hash = tx.hash(); - - let mut tx_executor = MockOneshotExecutor::default(); - tx_executor.set_call_responses(move |received_tx, _| { - assert_eq!(received_tx.hash(), tx_hash); - ExecutionResult::Success { - output: b"success!".to_vec(), - } - }); - let tx_executor = tx_executor.into(); - let (tx_sender, _) = create_test_tx_sender( - pool.clone(), - genesis_params.config().l2_chain_id, - tx_executor, + let genesis_config = genesis_params.config(); + let executor_options = SandboxExecutorOptions::new( + genesis_config.l2_chain_id, + AccountTreeId::new(genesis_config.fee_account), + u32::MAX, ) - .await; - let call_overrides = CallOverrides { - enforced_base_fee: None, - }; - let output = tx_sender - .eth_call(block_args, call_overrides, tx, None) + .await + .unwrap(); + + let pg_caches = PostgresStorageCaches::new(1, 1); + let tx_executor = SandboxExecutor::real(executor_options, pg_caches, usize::MAX); + create_test_tx_sender(pool, genesis_params.config().l2_chain_id, tx_executor) .await - .unwrap(); - assert_eq!(output, b"success!"); + .0 } diff --git a/core/node/api_server/src/tx_sender/tests/send_tx.rs b/core/node/api_server/src/tx_sender/tests/send_tx.rs new file mode 100644 index 00000000000..678b88dab94 --- /dev/null +++ b/core/node/api_server/src/tx_sender/tests/send_tx.rs @@ -0,0 +1,293 @@ +//! Tests for sending raw transactions. + +use assert_matches::assert_matches; +use test_casing::test_casing; +use zksync_multivm::interface::ExecutionResult; +use zksync_node_fee_model::MockBatchFeeParamsProvider; +use zksync_node_test_utils::create_l2_transaction; +use zksync_types::K256PrivateKey; + +use super::*; +use crate::testonly::{StateBuilder, TestAccount}; + +#[tokio::test] +async fn submitting_tx_requires_one_connection() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + let tx_hash = tx.hash(); + + // Manually set sufficient balance for the tx initiator. + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut tx_executor = MockOneshotExecutor::default(); + tx_executor.set_tx_responses(move |received_tx, _| { + assert_eq!(received_tx.hash(), tx_hash); + ExecutionResult::Success { output: vec![] } + }); + let tx_executor = SandboxExecutor::mock(tx_executor).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + + let submission_result = tx_sender.submit_tx(tx).await.unwrap(); + assert_matches!(submission_result.0, L2TxSubmissionResult::Added); + + let mut storage = pool.connection().await.unwrap(); + storage + .transactions_web3_dal() + .get_transaction_by_hash(tx_hash, l2_chain_id) + .await + .unwrap() + .expect("transaction is not persisted"); +} + +#[tokio::test] +async fn nonce_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + drop(storage); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let mut tx = create_l2_transaction(55, 555); + + tx_sender.validate_account_nonce(&tx).await.unwrap(); + // There should be some leeway with the nonce validation. + tx.common_data.nonce = Nonce(1); + tx_sender.validate_account_nonce(&tx).await.unwrap(); + + tx.common_data.nonce = Nonce(10_000); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 0 + ); + + let mut storage = pool.connection().await.unwrap(); + let nonce_key = get_nonce_key(&tx.initiator_account()); + let nonce_log = StorageLog::new_write_log(nonce_key, H256::from_low_u64_be(42)); + storage + .storage_logs_dal() + .append_storage_logs(L2BlockNumber(0), &[nonce_log]) + .await + .unwrap(); + drop(storage); + + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooHigh(from, _, actual) if actual == 10_000 && from == 42 + ); + + tx.common_data.nonce = Nonce(5); + let err = tx_sender.validate_account_nonce(&tx).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NonceIsTooLow(from, _, actual) if actual == 5 && from == 42 + ); +} + +#[tokio::test] +async fn fee_validation_errors() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let mut storage = pool.connection().await.unwrap(); + insert_genesis_batch(&mut storage, &GenesisParams::mock()) + .await + .unwrap(); + + let l2_chain_id = L2ChainId::default(); + let tx_executor = SandboxExecutor::mock(MockOneshotExecutor::default()).await; + let (tx_sender, _) = create_test_tx_sender(pool.clone(), l2_chain_id, tx_executor).await; + let fee_input = MockBatchFeeParamsProvider::default() + .get_batch_fee_input_scaled(1.0, 1.0) + .await + .unwrap(); + let (base_fee, gas_per_pubdata) = + derive_base_fee_and_gas_per_pubdata(fee_input, ProtocolVersionId::latest().into()); + let tx = create_l2_transaction(base_fee, gas_per_pubdata); + + StateBuilder::default() + .with_balance(tx.initiator_account(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + // Sanity check: validation should succeed with reasonable fee params. + tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap(); + + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = 100.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::IntrinsicGas); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.gas_limit = u64::MAX.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::GasLimitIsTooBig); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_fee_per_gas = 1.into(); + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxFeePerGasTooLow); + } + { + let mut tx = tx.clone(); + tx.common_data.fee.max_priority_fee_per_gas = tx.common_data.fee.max_fee_per_gas * 2; + let err = tx_sender + .validate_tx(&tx, ProtocolVersionId::latest()) + .await + .unwrap_err(); + assert_matches!(err, SubmitTxError::MaxPriorityFeeGreaterThanMaxFee); + } +} + +#[tokio::test] +async fn sending_transfer() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + + // Manually set sufficient balance for the tx initiator. + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let transfer = alice.create_transfer(1_000_000_000.into()); + let (sub_result, vm_result) = tx_sender.submit_tx(transfer).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_transfer_with_insufficient_balance() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let transfer = alice.create_transfer(transfer_value); + let err = tx_sender.submit_tx(transfer).await.unwrap_err(); + assert_matches!( + err, + SubmitTxError::NotEnoughBalanceForFeeValue(balance, _, value) if balance.is_zero() + && value == transfer_value + ); +} + +#[tokio::test] +async fn sending_transfer_with_incorrect_signature() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + let transfer_value = 1_000_000_000.into(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let mut transfer = alice.create_transfer(transfer_value); + transfer.execute.value = transfer_value / 2; // This should invalidate tx signature + let err = tx_sender.submit_tx(transfer).await.unwrap_err(); + assert_matches!(err, SubmitTxError::ValidationFailed(_)); +} + +#[test_casing(5, LOAD_TEST_CASES)] +#[tokio::test] +async fn sending_load_test_transaction(tx_params: LoadnextContractExecutionParams) { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_load_test_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_load_test_tx(tx_params); + let (sub_result, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + assert_matches!(sub_result, L2TxSubmissionResult::Added); + assert!(!vm_result.result.is_failed(), "{:?}", vm_result.result); +} + +#[tokio::test] +async fn sending_reverting_transaction() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_counter_contract(0) + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_counter_tx(1.into(), true); + let (_, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + assert_matches!( + vm_result.result, + ExecutionResult::Revert { output } if output.to_string().contains("This method always reverts") + ); +} + +#[tokio::test] +async fn sending_transaction_out_of_gas() { + let pool = ConnectionPool::::constrained_test_pool(1).await; + let tx_sender = create_real_tx_sender(pool).await; + let alice = K256PrivateKey::random(); + + let mut storage = tx_sender.acquire_replica_connection().await.unwrap(); + StateBuilder::default() + .with_infinite_loop_contract() + .with_balance(alice.address(), u64::MAX.into()) + .apply(&mut storage) + .await; + drop(storage); + + let tx = alice.create_infinite_loop_tx(); + let (_, vm_result) = tx_sender.submit_tx(tx).await.unwrap(); + assert_matches!(vm_result.result, ExecutionResult::Revert { .. }); +} diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs index 726beae2cc9..50981a2b284 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/debug.rs @@ -1,6 +1,5 @@ use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, ResultDebugCall, TracerConfig}, - debug_flat_call::DebugCallFlat, + api::{BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, TracerConfig}, transaction_request::CallRequest, H256, }; @@ -17,27 +16,17 @@ impl DebugNamespaceServer for DebugNamespace { &self, block: BlockNumber, options: Option, - ) -> RpcResult> { + ) -> RpcResult { self.debug_trace_block_impl(BlockId::Number(block), options) .await .map_err(|err| self.current_method().map_err(err)) } - async fn trace_block_by_number_flat( - &self, - block: BlockNumber, - options: Option, - ) -> RpcResult> { - self.debug_trace_block_flat_impl(BlockId::Number(block), options) - .await - .map_err(|err| self.current_method().map_err(err)) - } - async fn trace_block_by_hash( &self, hash: H256, options: Option, - ) -> RpcResult> { + ) -> RpcResult { self.debug_trace_block_impl(BlockId::Hash(hash), options) .await .map_err(|err| self.current_method().map_err(err)) @@ -48,7 +37,7 @@ impl DebugNamespaceServer for DebugNamespace { request: CallRequest, block: Option, options: Option, - ) -> RpcResult { + ) -> RpcResult { self.debug_trace_call_impl(request, block, options) .await .map_err(|err| self.current_method().map_err(err)) @@ -58,7 +47,7 @@ impl DebugNamespaceServer for DebugNamespace { &self, tx_hash: H256, options: Option, - ) -> RpcResult> { + ) -> RpcResult> { self.debug_trace_transaction_impl(tx_hash, options) .await .map_err(|err| self.current_method().map_err(err)) diff --git a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs index f83eb37ad96..31c8f15bb1e 100644 --- a/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs +++ b/core/node/api_server/src/web3/backend_jsonrpsee/namespaces/zks.rs @@ -55,7 +55,7 @@ impl ZksNamespaceServer for ZksNamespace { } async fn get_bridge_contracts(&self) -> RpcResult { - Ok(self.get_bridge_contracts_impl()) + Ok(self.get_bridge_contracts_impl().await) } async fn l1_chain_id(&self) -> RpcResult { diff --git a/core/node/api_server/src/web3/mod.rs b/core/node/api_server/src/web3/mod.rs index bad1b493a5f..620e9185078 100644 --- a/core/node/api_server/src/web3/mod.rs +++ b/core/node/api_server/src/web3/mod.rs @@ -47,6 +47,7 @@ use self::{ use crate::{ execution_sandbox::{BlockStartInfo, VmConcurrencyBarrier}, tx_sender::TxSender, + web3::state::BridgeAddressesHandle, }; pub mod backend_jsonrpsee; @@ -143,7 +144,6 @@ struct OptionalApiParams { #[derive(Debug)] pub struct ApiServer { pool: ConnectionPool, - updaters_pool: ConnectionPool, health_updater: Arc, config: InternalApiConfig, transport: ApiTransport, @@ -153,18 +153,21 @@ pub struct ApiServer { namespaces: Vec, method_tracer: Arc, optional: OptionalApiParams, + bridge_addresses_handle: BridgeAddressesHandle, + sealed_l2_block_handle: SealedL2BlockNumber, } #[derive(Debug)] pub struct ApiBuilder { pool: ConnectionPool, - updaters_pool: ConnectionPool, config: InternalApiConfig, polling_interval: Duration, pruning_info_refresh_interval: Duration, // Mandatory params that must be set using builder methods. transport: Option, tx_sender: Option, + bridge_addresses_handle: Option, + sealed_l2_block_handle: Option, // Optional params that may or may not be set using builder methods. We treat `namespaces` // specially because we want to output a warning if they are not set. namespaces: Option>, @@ -178,13 +181,14 @@ impl ApiBuilder { pub fn jsonrpsee_backend(config: InternalApiConfig, pool: ConnectionPool) -> Self { Self { - updaters_pool: pool.clone(), pool, config, polling_interval: Self::DEFAULT_POLLING_INTERVAL, pruning_info_refresh_interval: Self::DEFAULT_PRUNING_INFO_REFRESH_INTERVAL, transport: None, tx_sender: None, + bridge_addresses_handle: None, + sealed_l2_block_handle: None, namespaces: None, method_tracer: Arc::new(MethodTracer::default()), optional: OptionalApiParams::default(), @@ -201,15 +205,6 @@ impl ApiBuilder { self } - /// Configures a dedicated DB pool to be used for updating different information, - /// such as last mined block number or account nonces. This pool is used to execute - /// in a background task. If not called, the main pool will be used. If the API server is under high load, - /// it may make sense to supply a single-connection pool to reduce pool contention with the API methods. - pub fn with_updaters_pool(mut self, pool: ConnectionPool) -> Self { - self.updaters_pool = pool; - self - } - pub fn with_tx_sender(mut self, tx_sender: TxSender) -> Self { self.tx_sender = Some(tx_sender); self @@ -285,6 +280,22 @@ impl ApiBuilder { self } + pub fn with_sealed_l2_block_handle( + mut self, + sealed_l2_block_handle: SealedL2BlockNumber, + ) -> Self { + self.sealed_l2_block_handle = Some(sealed_l2_block_handle); + self + } + + pub fn with_bridge_addresses_handle( + mut self, + bridge_addresses_handle: BridgeAddressesHandle, + ) -> Self { + self.bridge_addresses_handle = Some(bridge_addresses_handle); + self + } + // Intended for tests only. #[doc(hidden)] fn with_pub_sub_events(mut self, sender: mpsc::UnboundedSender) -> Self { @@ -312,7 +323,6 @@ impl ApiBuilder { Ok(ApiServer { pool: self.pool, health_updater: Arc::new(health_updater), - updaters_pool: self.updaters_pool, config: self.config, transport, tx_sender: self.tx_sender.context("Transaction sender not set")?, @@ -326,6 +336,12 @@ impl ApiBuilder { }), method_tracer: self.method_tracer, optional: self.optional, + sealed_l2_block_handle: self + .sealed_l2_block_handle + .context("Sealed l2 block handle not set")?, + bridge_addresses_handle: self + .bridge_addresses_handle + .context("Bridge addresses handle not set")?, }) } } @@ -335,11 +351,8 @@ impl ApiServer { self.health_updater.subscribe() } - async fn build_rpc_state( - self, - last_sealed_l2_block: SealedL2BlockNumber, - ) -> anyhow::Result { - let mut storage = self.updaters_pool.connection_tagged("api").await?; + async fn build_rpc_state(self) -> anyhow::Result { + let mut storage = self.pool.connection_tagged("api").await?; let start_info = BlockStartInfo::new(&mut storage, self.pruning_info_refresh_interval).await?; drop(storage); @@ -363,7 +376,8 @@ impl ApiServer { api_config: self.config, start_info, mempool_cache: self.optional.mempool_cache, - last_sealed_l2_block, + last_sealed_l2_block: self.sealed_l2_block_handle, + bridge_addresses_handle: self.bridge_addresses_handle, tree_api: self.optional.tree_api, }) } @@ -371,11 +385,10 @@ impl ApiServer { async fn build_rpc_module( self, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, ) -> anyhow::Result> { let namespaces = self.namespaces.clone(); let zksync_network_id = self.config.l2_chain_id; - let rpc_state = self.build_rpc_state(last_sealed_l2_block).await?; + let rpc_state = self.build_rpc_state().await?; // Collect all the methods into a single RPC module. let mut rpc = RpcModule::new(()); @@ -473,21 +486,9 @@ impl ApiServer { self, stop_receiver: watch::Receiver, ) -> anyhow::Result { - // Chosen to be significantly smaller than the interval between L2 blocks, but larger than - // the latency of getting the latest sealed L2 block number from Postgres. If the API server - // processes enough requests, information about the latest sealed L2 block will be updated - // by reporting block difference metrics, so the actual update lag would be much smaller than this value. - const SEALED_L2_BLOCK_UPDATE_INTERVAL: Duration = Duration::from_millis(25); - let transport = self.transport; + let mut tasks = vec![]; - let (last_sealed_l2_block, sealed_l2_block_update_task) = SealedL2BlockNumber::new( - self.updaters_pool.clone(), - SEALED_L2_BLOCK_UPDATE_INTERVAL, - stop_receiver.clone(), - ); - - let mut tasks = vec![tokio::spawn(sealed_l2_block_update_task)]; let pub_sub = if matches!(transport, ApiTransport::WebSocket(_)) && self.namespaces.contains(&Namespace::Pubsub) { @@ -510,12 +511,8 @@ impl ApiServer { // framework it'll no longer be needed. let health_check = self.health_updater.subscribe(); let (local_addr_sender, local_addr) = oneshot::channel(); - let server_task = tokio::spawn(self.run_jsonrpsee_server( - stop_receiver, - pub_sub, - last_sealed_l2_block, - local_addr_sender, - )); + let server_task = + tokio::spawn(self.run_jsonrpsee_server(stop_receiver, pub_sub, local_addr_sender)); tasks.push(server_task); Ok(ApiServerHandles { @@ -584,7 +581,6 @@ impl ApiServer { self, mut stop_receiver: watch::Receiver, pub_sub: Option, - last_sealed_l2_block: SealedL2BlockNumber, local_addr_sender: oneshot::Sender, ) -> anyhow::Result<()> { let transport = self.transport; @@ -640,7 +636,7 @@ impl ApiServer { tracing::info!("Enabled extended call tracing for {transport_str} API server; this might negatively affect performance"); } - let rpc = self.build_rpc_module(pub_sub, last_sealed_l2_block).await?; + let rpc = self.build_rpc_module(pub_sub).await?; let registered_method_names = Arc::new(rpc.method_names().collect::>()); tracing::debug!( "Built RPC module for {transport_str} server with {} methods: {registered_method_names:?}", diff --git a/core/node/api_server/src/web3/namespaces/debug.rs b/core/node/api_server/src/web3/namespaces/debug.rs index ad00f6a878b..71560e4ddb8 100644 --- a/core/node/api_server/src/web3/namespaces/debug.rs +++ b/core/node/api_server/src/web3/namespaces/debug.rs @@ -1,62 +1,67 @@ use anyhow::Context as _; use zksync_dal::{CoreDal, DalError}; -use zksync_multivm::{ - interface::{ - Call, CallType, ExecutionResult, OneshotTracingParams, TxExecutionArgs, TxExecutionMode, - }, - vm_latest::constants::BATCH_COMPUTATIONAL_GAS_LIMIT, -}; +use zksync_multivm::interface::{Call, CallType, ExecutionResult, OneshotTracingParams}; use zksync_system_constants::MAX_ENCODED_TX_SIZE; use zksync_types::{ - api::{BlockId, BlockNumber, DebugCall, DebugCallType, ResultDebugCall, TracerConfig}, - debug_flat_call::{flatten_debug_calls, DebugCallFlat}, - fee_model::BatchFeeInput, + api::{ + BlockId, BlockNumber, CallTracerBlockResult, CallTracerResult, DebugCall, DebugCallType, + ResultDebugCall, SupportedTracers, TracerConfig, + }, + debug_flat_call::{Action, CallResult, DebugCallFlat}, l2::L2Tx, transaction_request::CallRequest, - web3, AccountTreeId, H256, U256, + web3, H256, U256, }; use zksync_web3_decl::error::Web3Error; use crate::{ - execution_sandbox::TxSetupArgs, - tx_sender::{ApiContracts, TxSenderConfig}, + execution_sandbox::SandboxAction, web3::{backend_jsonrpsee::MethodTracer, state::RpcState}, }; #[derive(Debug, Clone)] pub(crate) struct DebugNamespace { - batch_fee_input: BatchFeeInput, state: RpcState, - api_contracts: ApiContracts, } impl DebugNamespace { pub async fn new(state: RpcState) -> anyhow::Result { - let api_contracts = ApiContracts::load_from_disk().await?; - let fee_input_provider = &state.tx_sender.0.batch_fee_input_provider; - let batch_fee_input = fee_input_provider - .get_batch_fee_input_scaled( - state.api_config.estimate_gas_scale_factor, - state.api_config.estimate_gas_scale_factor, - ) - .await - .context("cannot get batch fee input")?; - - Ok(Self { - // For now, the same scaling is used for both the L1 gas price and the pubdata price - batch_fee_input, - state, - api_contracts, - }) + Ok(Self { state }) } - pub(crate) fn map_call(call: Call, only_top_call: bool) -> DebugCall { + pub(crate) fn map_call( + call: Call, + index: usize, + transaction_hash: H256, + tracer_option: TracerConfig, + ) -> CallTracerResult { + match tracer_option.tracer { + SupportedTracers::CallTracer => CallTracerResult::CallTrace(Self::map_default_call( + call, + tracer_option.tracer_config.only_top_call, + )), + SupportedTracers::FlatCallTracer => { + let mut calls = vec![]; + let mut traces = vec![index]; + Self::flatten_call( + call, + &mut calls, + &mut traces, + tracer_option.tracer_config.only_top_call, + index, + transaction_hash, + ); + CallTracerResult::FlatCallTrace(calls) + } + } + } + pub(crate) fn map_default_call(call: Call, only_top_call: bool) -> DebugCall { let calls = if only_top_call { vec![] } else { call.calls .into_iter() - .map(|call| Self::map_call(call, false)) + .map(|call| Self::map_default_call(call, false)) .collect() }; let debug_type = match call.r#type { @@ -79,8 +84,61 @@ impl DebugNamespace { } } - fn sender_config(&self) -> &TxSenderConfig { - &self.state.tx_sender.0.sender_config + fn flatten_call( + call: Call, + calls: &mut Vec, + trace_address: &mut Vec, + only_top_call: bool, + transaction_position: usize, + transaction_hash: H256, + ) { + let subtraces = call.calls.len(); + let debug_type = match call.r#type { + CallType::Call(_) => DebugCallType::Call, + CallType::Create => DebugCallType::Create, + CallType::NearCall => unreachable!("We have to filter our near calls before"), + }; + + let result = if call.error.is_none() { + Some(CallResult { + output: web3::Bytes::from(call.output), + gas_used: U256::from(call.gas_used), + }) + } else { + None + }; + + calls.push(DebugCallFlat { + action: Action { + call_type: debug_type, + from: call.from, + to: call.to, + gas: U256::from(call.gas), + value: call.value, + input: web3::Bytes::from(call.input), + }, + result, + subtraces, + trace_address: trace_address.clone(), // Clone the current trace address + transaction_position, + transaction_hash, + r#type: DebugCallType::Call, + }); + + if !only_top_call { + for (number, call) in call.calls.into_iter().enumerate() { + trace_address.push(number); + Self::flatten_call( + call, + calls, + trace_address, + false, + transaction_position, + transaction_hash, + ); + trace_address.pop(); + } + } } pub(crate) fn current_method(&self) -> &MethodTracer { @@ -91,16 +149,13 @@ impl DebugNamespace { &self, block_id: BlockId, options: Option, - ) -> Result, Web3Error> { + ) -> Result { self.current_method().set_block_id(block_id); if matches!(block_id, BlockId::Number(BlockNumber::Pending)) { // See `EthNamespace::get_block_impl()` for an explanation why this check is needed. - return Ok(vec![]); + return Ok(CallTracerBlockResult::CallTrace(vec![])); } - let only_top_call = options - .map(|options| options.tracer_config.only_top_call) - .unwrap_or(false); let mut connection = self.state.acquire_connection().await?; let block_number = self.state.resolve_block(&mut connection, block_id).await?; self.current_method() @@ -111,41 +166,55 @@ impl DebugNamespace { .get_traces_for_l2_block(block_number) .await .map_err(DalError::generalize)?; - let call_trace = call_traces - .into_iter() - .map(|call_trace| { - let result = Self::map_call(call_trace, only_top_call); - ResultDebugCall { result } - }) - .collect(); - Ok(call_trace) - } - pub async fn debug_trace_block_flat_impl( - &self, - block_id: BlockId, - options: Option, - ) -> Result, Web3Error> { - let call_trace = self.debug_trace_block_impl(block_id, options).await?; - let call_trace_flat = flatten_debug_calls(call_trace); - Ok(call_trace_flat) + let options = options.unwrap_or_default(); + let result = match options.tracer { + SupportedTracers::CallTracer => CallTracerBlockResult::CallTrace( + call_traces + .into_iter() + .map(|(call, _, _)| ResultDebugCall { + result: Self::map_default_call(call, options.tracer_config.only_top_call), + }) + .collect(), + ), + SupportedTracers::FlatCallTracer => { + let mut flat_calls = vec![]; + for (call, tx_hash, tx_index) in call_traces { + let mut traces = vec![tx_index]; + Self::flatten_call( + call, + &mut flat_calls, + &mut traces, + options.tracer_config.only_top_call, + tx_index, + tx_hash, + ); + } + CallTracerBlockResult::FlatCallTrace(flat_calls) + } + }; + Ok(result) } pub async fn debug_trace_transaction_impl( &self, tx_hash: H256, options: Option, - ) -> Result, Web3Error> { - let only_top_call = options - .map(|options| options.tracer_config.only_top_call) - .unwrap_or(false); + ) -> Result, Web3Error> { let mut connection = self.state.acquire_connection().await?; let call_trace = connection .transactions_dal() .get_call_trace(tx_hash) .await .map_err(DalError::generalize)?; - Ok(call_trace.map(|call_trace| Self::map_call(call_trace, only_top_call))) + Ok(call_trace.map(|(call_trace, index_in_block)| { + Self::map_call( + call_trace, + index_in_block, + tx_hash, + options.unwrap_or_default(), + ) + })) } pub async fn debug_trace_call_impl( @@ -153,13 +222,11 @@ impl DebugNamespace { mut request: CallRequest, block_id: Option, options: Option, - ) -> Result { + ) -> Result { let block_id = block_id.unwrap_or(BlockId::Number(BlockNumber::Pending)); self.current_method().set_block_id(block_id); - let only_top_call = options - .map(|options| options.tracer_config.only_top_call) - .unwrap_or(false); + let options = options.unwrap_or_default(); let mut connection = self.state.acquire_connection().await?; let block_args = self @@ -174,12 +241,26 @@ impl DebugNamespace { if request.gas.is_none() { request.gas = Some(block_args.default_eth_call_gas(&mut connection).await?); } - drop(connection); + + let fee_input = if block_args.resolves_to_latest_sealed_l2_block() { + // It is important to drop a DB connection before calling the provider, since it acquires a connection internally + // on the main node. + drop(connection); + let scale_factor = self.state.api_config.estimate_gas_scale_factor; + let fee_input_provider = &self.state.tx_sender.0.batch_fee_input_provider; + // For now, the same scaling is used for both the L1 gas price and the pubdata price + fee_input_provider + .get_batch_fee_input_scaled(scale_factor, scale_factor) + .await? + } else { + let fee_input = block_args.historical_fee_input(&mut connection).await?; + drop(connection); + fee_input + }; let call_overrides = request.get_call_overrides()?; - let tx = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; + let call = L2Tx::from_request(request.into(), MAX_ENCODED_TX_SIZE)?; - let setup_args = self.call_args(call_overrides.enforced_base_fee).await; let vm_permit = self .state .tx_sender @@ -190,20 +271,23 @@ impl DebugNamespace { // We don't need properly trace if we only need top call let tracing_params = OneshotTracingParams { - trace_calls: !only_top_call, + trace_calls: !options.tracer_config.only_top_call, }; let connection = self.state.acquire_connection().await?; let executor = &self.state.tx_sender.0.executor; let result = executor - .execute_tx_in_sandbox( + .execute_in_sandbox( vm_permit, - setup_args, - TxExecutionArgs::for_eth_call(tx.clone()), connection, - block_args, + SandboxAction::Call { + call: call.clone(), + fee_input, + enforced_base_fee: call_overrides.enforced_base_fee, + tracing_params, + }, + &block_args, None, - tracing_params, ) .await?; @@ -217,35 +301,17 @@ impl DebugNamespace { )) } }; - + // It's a call request, it's safe to keep it zero + let hash = H256::zero(); let call = Call::new_high_level( - tx.common_data.fee.gas_limit.as_u64(), + call.common_data.fee.gas_limit.as_u64(), result.vm.statistics.gas_used, - tx.execute.value, - tx.execute.calldata, + call.execute.value, + call.execute.calldata, output, revert_reason, result.call_traces, ); - Ok(Self::map_call(call, false)) - } - - async fn call_args(&self, enforced_base_fee: Option) -> TxSetupArgs { - let sender_config = self.sender_config(); - TxSetupArgs { - execution_mode: TxExecutionMode::EthCall, - operator_account: AccountTreeId::default(), - fee_input: self.batch_fee_input, - base_system_contracts: self.api_contracts.eth_call.clone(), - caches: self.state.tx_sender.storage_caches().clone(), - validation_computational_gas_limit: BATCH_COMPUTATIONAL_GAS_LIMIT, - chain_id: sender_config.chain_id, - whitelisted_tokens_for_aa: self - .state - .tx_sender - .read_whitelisted_tokens_for_aa_cache() - .await, - enforced_base_fee, - } + Ok(Self::map_call(call, 0, hash, options)) } } diff --git a/core/node/api_server/src/web3/namespaces/en.rs b/core/node/api_server/src/web3/namespaces/en.rs index 26f4aa2b0b5..a412c064fac 100644 --- a/core/node/api_server/src/web3/namespaces/en.rs +++ b/core/node/api_server/src/web3/namespaces/en.rs @@ -34,7 +34,9 @@ impl EnNamespace { return Ok(None); }; Ok(Some(en::ConsensusGlobalConfig( - zksync_protobuf::serde::serialize(&cfg, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::Serialize + .proto_fmt(&cfg, serde_json::value::Serializer) + .unwrap(), ))) } @@ -49,7 +51,9 @@ impl EnNamespace { return Ok(None); }; Ok(Some(en::ConsensusGenesis( - zksync_protobuf::serde::serialize(&cfg.genesis, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::Serialize + .proto_fmt(&cfg.genesis, serde_json::value::Serializer) + .unwrap(), ))) } @@ -76,7 +80,9 @@ impl EnNamespace { return Ok(None); }; Ok(Some(en::AttestationStatus( - zksync_protobuf::serde::serialize(&status, serde_json::value::Serializer).unwrap(), + zksync_protobuf::serde::Serialize + .proto_fmt(&status, serde_json::value::Serializer) + .unwrap(), ))) } diff --git a/core/node/api_server/src/web3/namespaces/eth.rs b/core/node/api_server/src/web3/namespaces/eth.rs index fda5ff6f06b..1d60d839e4e 100644 --- a/core/node/api_server/src/web3/namespaces/eth.rs +++ b/core/node/api_server/src/web3/namespaces/eth.rs @@ -19,6 +19,7 @@ use zksync_web3_decl::{ }; use crate::{ + tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, state::RpcState, TypedFilter}, }; @@ -129,6 +130,7 @@ impl EthNamespace { let scale_factor = self.state.api_config.estimate_gas_scale_factor; let acceptable_overestimation = self.state.api_config.estimate_gas_acceptable_overestimation; + let search_kind = BinarySearchKind::new(self.state.api_config.estimate_gas_optimize_search); let fee = self .state @@ -138,6 +140,7 @@ impl EthNamespace { scale_factor, acceptable_overestimation as u64, state_override, + search_kind, ) .await?; Ok(fee.gas_limit) @@ -690,15 +693,16 @@ impl EthNamespace { base_fee_per_gas.len() ]); + // `base_fee_per_gas` for next L2 block cannot be calculated, appending last fee as a placeholder. + base_fee_per_gas.push(*base_fee_per_gas.last().unwrap()); + // We do not support EIP-4844, but per API specification we should return 0 for pre EIP-4844 blocks. let base_fee_per_blob_gas = vec![U256::zero(); base_fee_per_gas.len()]; - let blob_gas_used_ratio = vec![0.0; base_fee_per_gas.len()]; + let blob_gas_used_ratio = vec![0.0; gas_used_ratio.len()]; - // `base_fee_per_gas` for next L2 block cannot be calculated, appending last fee as a placeholder. - base_fee_per_gas.push(*base_fee_per_gas.last().unwrap()); Ok(FeeHistory { inner: web3::FeeHistory { - oldest_block: zksync_types::web3::BlockNumber::Number(oldest_block.into()), + oldest_block: web3::BlockNumber::Number(oldest_block.into()), base_fee_per_gas, gas_used_ratio, reward, diff --git a/core/node/api_server/src/web3/namespaces/zks.rs b/core/node/api_server/src/web3/namespaces/zks.rs index 4f88eb17e23..2192f11eb14 100644 --- a/core/node/api_server/src/web3/namespaces/zks.rs +++ b/core/node/api_server/src/web3/namespaces/zks.rs @@ -30,6 +30,7 @@ use zksync_web3_decl::{ }; use crate::{ + tx_sender::BinarySearchKind, utils::open_readonly_transaction, web3::{backend_jsonrpsee::MethodTracer, metrics::API_METRICS, RpcState}, }; @@ -104,6 +105,7 @@ impl ZksNamespace { let scale_factor = self.state.api_config.estimate_gas_scale_factor; let acceptable_overestimation = self.state.api_config.estimate_gas_acceptable_overestimation; + let search_kind = BinarySearchKind::new(self.state.api_config.estimate_gas_optimize_search); Ok(self .state @@ -113,6 +115,7 @@ impl ZksNamespace { scale_factor, acceptable_overestimation as u64, state_override, + search_kind, ) .await?) } @@ -129,8 +132,8 @@ impl ZksNamespace { self.state.api_config.l2_testnet_paymaster_addr } - pub fn get_bridge_contracts_impl(&self) -> BridgeAddresses { - self.state.api_config.bridge_addresses.clone() + pub async fn get_bridge_contracts_impl(&self) -> BridgeAddresses { + self.state.bridge_addresses_handle.read().await } pub fn l1_chain_id_impl(&self) -> U64 { diff --git a/core/node/api_server/src/web3/state.rs b/core/node/api_server/src/web3/state.rs index 5c8b47dabeb..723661ab908 100644 --- a/core/node/api_server/src/web3/state.rs +++ b/core/node/api_server/src/web3/state.rs @@ -4,13 +4,13 @@ use std::{ atomic::{AtomicU32, Ordering}, Arc, }, - time::{Duration, Instant}, + time::Instant, }; use anyhow::Context as _; use futures::TryFutureExt; use lru::LruCache; -use tokio::sync::{watch, Mutex}; +use tokio::sync::{Mutex, RwLock}; use vise::GaugeGuard; use zksync_config::{ configs::{api::Web3JsonRpcConfig, ContractsConfig}, @@ -20,8 +20,9 @@ use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_metadata_calculator::api_server::TreeApiClient; use zksync_node_sync::SyncState; use zksync_types::{ - api, commitment::L1BatchCommitmentMode, l2::L2Tx, transaction_request::CallRequest, Address, - L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, H256, U256, U64, + api, api::BridgeAddresses, commitment::L1BatchCommitmentMode, l2::L2Tx, + transaction_request::CallRequest, Address, L1BatchNumber, L1ChainId, L2BlockNumber, L2ChainId, + H256, U256, U64, }; use zksync_web3_decl::{error::Web3Error, types::Filter}; @@ -90,7 +91,7 @@ impl BlockStartInfo { } /// Configuration values for the API. -/// This structure is detached from `ZkSyncConfig`, since different node types (main, external, etc) +/// This structure is detached from `ZkSyncConfig`, since different node types (main, external, etc.) /// may require different configuration layouts. /// The intention is to only keep the actually used information here. #[derive(Debug, Clone)] @@ -101,6 +102,7 @@ pub struct InternalApiConfig { pub max_tx_size: usize, pub estimate_gas_scale_factor: f64, pub estimate_gas_acceptable_overestimation: u32, + pub estimate_gas_optimize_search: bool, pub bridge_addresses: api::BridgeAddresses, pub bridgehub_proxy_addr: Option
, pub state_transition_proxy_addr: Option
, @@ -128,6 +130,7 @@ impl InternalApiConfig { estimate_gas_scale_factor: web3_config.estimate_gas_scale_factor, estimate_gas_acceptable_overestimation: web3_config .estimate_gas_acceptable_overestimation, + estimate_gas_optimize_search: web3_config.estimate_gas_optimize_search, bridge_addresses: api::BridgeAddresses { l1_erc20_default_bridge: contracts_config.l1_erc20_bridge_proxy_addr, l2_erc20_default_bridge: contracts_config.l2_erc20_bridge_addr, @@ -171,51 +174,16 @@ impl InternalApiConfig { /// Thread-safe updatable information about the last sealed L2 block number. /// /// The information may be temporarily outdated and thus should only be used where this is OK -/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`] -/// and on an interval specified when creating an instance. -#[derive(Debug, Clone)] -pub(crate) struct SealedL2BlockNumber(Arc); +/// (e.g., for metrics reporting). The value is updated by [`Self::diff()`] and [`Self::diff_with_block_args()`]. +#[derive(Debug, Clone, Default)] +pub struct SealedL2BlockNumber(Arc); impl SealedL2BlockNumber { - /// Creates a handle to the last sealed L2 block number together with a task that will update - /// it on a schedule. - pub fn new( - connection_pool: ConnectionPool, - update_interval: Duration, - stop_receiver: watch::Receiver, - ) -> (Self, impl Future>) { - let this = Self(Arc::default()); - let number_updater = this.clone(); - - let update_task = async move { - loop { - if *stop_receiver.borrow() { - tracing::debug!("Stopping latest sealed L2 block updates"); - return Ok(()); - } - - let mut connection = connection_pool.connection_tagged("api").await.unwrap(); - let Some(last_sealed_l2_block) = - connection.blocks_dal().get_sealed_l2_block_number().await? - else { - tokio::time::sleep(update_interval).await; - continue; - }; - drop(connection); - - number_updater.update(last_sealed_l2_block); - tokio::time::sleep(update_interval).await; - } - }; - - (this, update_task) - } - /// Potentially updates the last sealed L2 block number by comparing it to the provided /// sealed L2 block number (not necessarily the last one). /// /// Returns the last sealed L2 block number after the update. - fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { + pub fn update(&self, maybe_newer_l2_block_number: L2BlockNumber) -> L2BlockNumber { let prev_value = self .0 .fetch_max(maybe_newer_l2_block_number.0, Ordering::Relaxed); @@ -229,7 +197,7 @@ impl SealedL2BlockNumber { /// Returns the difference between the latest L2 block number and the resolved L2 block number /// from `block_args`. - pub fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { + pub(crate) fn diff_with_block_args(&self, block_args: &BlockArgs) -> u32 { // We compute the difference in any case, since it may update the stored value. let diff = self.diff(block_args.resolved_block_number()); @@ -241,6 +209,23 @@ impl SealedL2BlockNumber { } } +#[derive(Debug, Clone)] +pub struct BridgeAddressesHandle(Arc>); + +impl BridgeAddressesHandle { + pub fn new(bridge_addresses: BridgeAddresses) -> Self { + Self(Arc::new(RwLock::new(bridge_addresses))) + } + + pub async fn update(&self, bridge_addresses: BridgeAddresses) { + *self.0.write().await = bridge_addresses; + } + + pub async fn read(&self) -> BridgeAddresses { + self.0.read().await.clone() + } +} + /// Holder for the data required for the API to be functional. #[derive(Debug, Clone)] pub(crate) struct RpcState { @@ -256,6 +241,7 @@ pub(crate) struct RpcState { pub(super) start_info: BlockStartInfo, pub(super) mempool_cache: Option, pub(super) last_sealed_l2_block: SealedL2BlockNumber, + pub(super) bridge_addresses_handle: BridgeAddressesHandle, } impl RpcState { diff --git a/core/node/api_server/src/web3/testonly.rs b/core/node/api_server/src/web3/testonly.rs index a77498d4341..3b05e235c6d 100644 --- a/core/node/api_server/src/web3/testonly.rs +++ b/core/node/api_server/src/web3/testonly.rs @@ -2,56 +2,26 @@ use std::{pin::Pin, time::Instant}; -use async_trait::async_trait; use tokio::sync::watch; use zksync_config::configs::{api::Web3JsonRpcConfig, chain::StateKeeperConfig, wallets::Wallets}; use zksync_dal::ConnectionPool; use zksync_health_check::CheckHealth; -use zksync_node_fee_model::{BatchFeeModelInputProvider, MockBatchFeeParamsProvider}; +use zksync_node_fee_model::MockBatchFeeParamsProvider; use zksync_state::PostgresStorageCaches; use zksync_state_keeper::seal_criteria::NoopSealer; -use zksync_types::{ - fee_model::{BatchFeeInput, FeeParams}, - L2ChainId, -}; +use zksync_types::L2ChainId; use zksync_vm_executor::oneshot::MockOneshotExecutor; use super::{metrics::ApiTransportLabel, *}; -use crate::{execution_sandbox::TransactionExecutor, tx_sender::TxSenderConfig}; +use crate::{execution_sandbox::SandboxExecutor, tx_sender::TxSenderConfig}; const TEST_TIMEOUT: Duration = Duration::from_secs(90); const POLL_INTERVAL: Duration = Duration::from_millis(50); -/// Same as [`MockBatchFeeParamsProvider`], but also artificially acquires a Postgres connection on each call -/// (same as the real node implementation). -#[derive(Debug)] -struct MockApiBatchFeeParamsProvider { - inner: MockBatchFeeParamsProvider, - pool: ConnectionPool, -} - -#[async_trait] -impl BatchFeeModelInputProvider for MockApiBatchFeeParamsProvider { - async fn get_batch_fee_input_scaled( - &self, - l1_gas_price_scale_factor: f64, - l1_pubdata_price_scale_factor: f64, - ) -> anyhow::Result { - let _connection = self.pool.connection().await?; - self.inner - .get_batch_fee_input_scaled(l1_gas_price_scale_factor, l1_pubdata_price_scale_factor) - .await - } - - fn get_fee_model_params(&self) -> FeeParams { - self.inner.get_fee_model_params() - } -} - pub(crate) async fn create_test_tx_sender( pool: ConnectionPool, l2_chain_id: L2ChainId, - tx_executor: TransactionExecutor, + tx_executor: SandboxExecutor, ) -> (TxSender, VmConcurrencyBarrier) { let web3_config = Web3JsonRpcConfig::for_tests(); let state_keeper_config = StateKeeperConfig::for_tests(); @@ -64,10 +34,7 @@ pub(crate) async fn create_test_tx_sender( ); let storage_caches = PostgresStorageCaches::new(1, 1); - let batch_fee_model_input_provider = Arc::new(MockApiBatchFeeParamsProvider { - inner: MockBatchFeeParamsProvider::default(), - pool: pool.clone(), - }); + let batch_fee_model_input_provider = Arc::::default(); let (mut tx_sender, vm_barrier) = crate::tx_sender::build_tx_sender( &tx_sender_config, &web3_config, @@ -130,42 +97,72 @@ impl ApiServerHandles { } } -pub async fn spawn_http_server( - api_config: InternalApiConfig, +/// Builder for test server instances. +#[derive(Debug)] +pub struct TestServerBuilder { pool: ConnectionPool, + api_config: InternalApiConfig, tx_executor: MockOneshotExecutor, method_tracer: Arc, - stop_receiver: watch::Receiver, -) -> ApiServerHandles { - spawn_server( - ApiTransportLabel::Http, - api_config, - pool, - None, - tx_executor, - method_tracer, - stop_receiver, - ) - .await - .0 } -pub async fn spawn_ws_server( - api_config: InternalApiConfig, - pool: ConnectionPool, - stop_receiver: watch::Receiver, - websocket_requests_per_minute_limit: Option, -) -> (ApiServerHandles, mpsc::UnboundedReceiver) { - spawn_server( - ApiTransportLabel::Ws, - api_config, - pool, - websocket_requests_per_minute_limit, - MockOneshotExecutor::default(), - Arc::default(), - stop_receiver, - ) - .await +impl TestServerBuilder { + /// Creates a new builder. + pub fn new(pool: ConnectionPool, api_config: InternalApiConfig) -> Self { + Self { + api_config, + pool, + tx_executor: MockOneshotExecutor::default(), + method_tracer: Arc::default(), + } + } + + /// Sets a transaction / call executor for this builder. + #[must_use] + pub fn with_tx_executor(mut self, tx_executor: MockOneshotExecutor) -> Self { + self.tx_executor = tx_executor; + self + } + + /// Sets an RPC method tracer for this builder. + #[must_use] + pub fn with_method_tracer(mut self, tracer: Arc) -> Self { + self.method_tracer = tracer; + self + } + + /// Builds an HTTP server. + pub async fn build_http(self, stop_receiver: watch::Receiver) -> ApiServerHandles { + spawn_server( + ApiTransportLabel::Http, + self.api_config, + self.pool, + None, + self.tx_executor, + self.method_tracer, + stop_receiver, + ) + .await + .0 + } + + /// Builds a WS server. + pub async fn build_ws( + self, + websocket_requests_per_minute_limit: Option, + stop_receiver: watch::Receiver, + ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { + spawn_server( + ApiTransportLabel::Ws, + self.api_config, + self.pool, + websocket_requests_per_minute_limit, + self.tx_executor, + self.method_tracer, + stop_receiver, + ) + .await + } } async fn spawn_server( @@ -177,12 +174,15 @@ async fn spawn_server( method_tracer: Arc, stop_receiver: watch::Receiver, ) -> (ApiServerHandles, mpsc::UnboundedReceiver) { + let tx_executor = SandboxExecutor::mock(tx_executor).await; let (tx_sender, vm_barrier) = - create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor.into()).await; + create_test_tx_sender(pool.clone(), api_config.l2_chain_id, tx_executor).await; let (pub_sub_events_sender, pub_sub_events_receiver) = mpsc::unbounded_channel(); let mut namespaces = Namespace::DEFAULT.to_vec(); - namespaces.extend([Namespace::Debug, Namespace::Snapshots]); + namespaces.extend([Namespace::Debug, Namespace::Snapshots, Namespace::Unstable]); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = BridgeAddressesHandle::new(api_config.bridge_addresses.clone()); let server_builder = match transport { ApiTransportLabel::Http => ApiBuilder::jsonrpsee_backend(api_config, pool).http(0), @@ -204,6 +204,8 @@ async fn spawn_server( .with_pub_sub_events(pub_sub_events_sender) .with_method_tracer(method_tracer) .enable_api_namespaces(namespaces) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle) .build() .expect("Unable to build API server") .run(stop_receiver) diff --git a/core/node/api_server/src/web3/tests/debug.rs b/core/node/api_server/src/web3/tests/debug.rs index 76496b42cad..4f021b777ae 100644 --- a/core/node/api_server/src/web3/tests/debug.rs +++ b/core/node/api_server/src/web3/tests/debug.rs @@ -1,7 +1,10 @@ //! Tests for the `debug` Web3 namespace. use zksync_multivm::interface::{Call, TransactionExecutionResult}; -use zksync_types::BOOTLOADER_ADDRESS; +use zksync_types::{ + api::{CallTracerConfig, SupportedTracers, TracerConfig}, + BOOTLOADER_ADDRESS, +}; use zksync_web3_decl::{ client::{DynClient, L2}, namespaces::DebugNamespaceClient, @@ -58,18 +61,19 @@ impl HttpTest for TraceBlockTest { let block_traces = match block_id { api::BlockId::Number(number) => client.trace_block_by_number(number, None).await?, api::BlockId::Hash(hash) => client.trace_block_by_hash(hash, None).await?, - }; + } + .unwrap_default(); assert_eq!(block_traces.len(), tx_results.len()); // equals to the number of transactions in the block for (trace, tx_result) in block_traces.iter().zip(&tx_results) { - let api::ResultDebugCall { result } = trace; + let result = &trace.result; assert_eq!(result.from, Address::zero()); assert_eq!(result.to, BOOTLOADER_ADDRESS); assert_eq!(result.gas, tx_result.transaction.gas_limit()); let expected_calls: Vec<_> = tx_result .call_traces .iter() - .map(|call| DebugNamespace::map_call(call.clone(), false)) + .map(|call| DebugNamespace::map_default_call(call.clone(), false)) .collect(); assert_eq!(result.calls, expected_calls); } @@ -122,7 +126,18 @@ impl HttpTest for TraceBlockFlatTest { for block_id in block_ids { if let api::BlockId::Number(number) = block_id { - let block_traces = client.trace_block_by_number_flat(number, None).await?; + let block_traces = client + .trace_block_by_number( + number, + Some(TracerConfig { + tracer: SupportedTracers::FlatCallTracer, + tracer_config: CallTracerConfig { + only_top_call: false, + }, + }), + ) + .await? + .unwrap_flat(); // A transaction with 2 nested calls will convert into 3 Flattened calls. // Also in this test, all tx have the same # of nested calls @@ -133,10 +148,10 @@ impl HttpTest for TraceBlockFlatTest { // First tx has 2 nested calls, thus 2 sub-traces assert_eq!(block_traces[0].subtraces, 2); - assert_eq!(block_traces[0].traceaddress, [0]); + assert_eq!(block_traces[0].trace_address, [0]); // Second flat-call (fist nested call) do not have nested calls assert_eq!(block_traces[1].subtraces, 0); - assert_eq!(block_traces[1].traceaddress, [0, 0]); + assert_eq!(block_traces[1].trace_address, [0, 0]); let top_level_call_indexes = [0, 3, 6]; let top_level_traces = top_level_call_indexes @@ -157,7 +172,15 @@ impl HttpTest for TraceBlockFlatTest { let missing_block_number = api::BlockNumber::from(*self.0 + 100); let error = client - .trace_block_by_number_flat(missing_block_number, None) + .trace_block_by_number( + missing_block_number, + Some(TracerConfig { + tracer: SupportedTracers::FlatCallTracer, + tracer_config: CallTracerConfig { + only_top_call: false, + }, + }), + ) .await .unwrap_err(); if let ClientError::Call(error) = error { @@ -198,13 +221,14 @@ impl HttpTest for TraceTransactionTest { let expected_calls: Vec<_> = tx_results[0] .call_traces .iter() - .map(|call| DebugNamespace::map_call(call.clone(), false)) + .map(|call| DebugNamespace::map_default_call(call.clone(), false)) .collect(); let result = client .trace_transaction(tx_results[0].hash, None) .await? - .context("no transaction traces")?; + .context("no transaction traces")? + .unwrap_default(); assert_eq!(result.from, Address::zero()); assert_eq!(result.to, BOOTLOADER_ADDRESS); assert_eq!(result.gas, tx_results[0].transaction.gas_limit()); diff --git a/core/node/api_server/src/web3/tests/mod.rs b/core/node/api_server/src/web3/tests/mod.rs index 635620e9c52..632e263c653 100644 --- a/core/node/api_server/src/web3/tests/mod.rs +++ b/core/node/api_server/src/web3/tests/mod.rs @@ -58,11 +58,12 @@ use zksync_web3_decl::{ }; use super::*; -use crate::web3::testonly::{spawn_http_server, spawn_ws_server}; +use crate::web3::testonly::TestServerBuilder; mod debug; mod filters; mod snapshots; +mod unstable; mod vm; mod ws; @@ -244,14 +245,11 @@ async fn test_http_server(test: impl HttpTest) { let genesis = GenesisConfig::for_tests(); let mut api_config = InternalApiConfig::new(&web3_config, &contracts_config, &genesis); api_config.filters_disabled = test.filters_disabled(); - let mut server_handles = spawn_http_server( - api_config, - pool.clone(), - test.transaction_executor(), - test.method_tracer(), - stop_receiver, - ) - .await; + let mut server_handles = TestServerBuilder::new(pool.clone(), api_config) + .with_tx_executor(test.transaction_executor()) + .with_method_tracer(test.method_tracer()) + .build_http(stop_receiver) + .await; let local_addr = server_handles.wait_until_ready().await; let client = Client::http(format!("http://{local_addr}/").parse().unwrap()) @@ -305,6 +303,17 @@ async fn store_l2_block( number: L2BlockNumber, transaction_results: &[TransactionExecutionResult], ) -> anyhow::Result { + let header = create_l2_block(number.0); + store_custom_l2_block(storage, &header, transaction_results).await?; + Ok(header) +} + +async fn store_custom_l2_block( + storage: &mut Connection<'_, Core>, + header: &L2BlockHeader, + transaction_results: &[TransactionExecutionResult], +) -> anyhow::Result<()> { + let number = header.number; for result in transaction_results { let l2_tx = result.transaction.clone().try_into().unwrap(); let tx_submission_result = storage @@ -327,19 +336,18 @@ async fn store_l2_block( .append_storage_logs(number, &[l2_block_log]) .await?; - let new_l2_block = create_l2_block(number.0); - storage.blocks_dal().insert_l2_block(&new_l2_block).await?; + storage.blocks_dal().insert_l2_block(header).await?; storage .transactions_dal() .mark_txs_as_executed_in_l2_block( - new_l2_block.number, + number, transaction_results, 1.into(), ProtocolVersionId::latest(), false, ) .await?; - Ok(new_l2_block) + Ok(()) } async fn seal_l1_batch( diff --git a/core/node/api_server/src/web3/tests/unstable.rs b/core/node/api_server/src/web3/tests/unstable.rs new file mode 100644 index 00000000000..1d425f8b951 --- /dev/null +++ b/core/node/api_server/src/web3/tests/unstable.rs @@ -0,0 +1,69 @@ +//! Tests for the `unstable` Web3 namespace. + +use zksync_types::tee_types::TeeType; +use zksync_web3_decl::namespaces::UnstableNamespaceClient; + +use super::*; + +#[derive(Debug)] +struct GetTeeProofsTest {} + +impl GetTeeProofsTest { + fn new() -> Self { + Self {} + } +} + +#[async_trait] +impl HttpTest for GetTeeProofsTest { + async fn test( + &self, + client: &DynClient, + pool: &ConnectionPool, + ) -> anyhow::Result<()> { + let batch_no = L1BatchNumber(1337); + let tee_type = TeeType::Sgx; + let proof = client.tee_proofs(batch_no, Some(tee_type)).await?; + + assert!(proof.is_empty()); + + let mut storage = pool.connection().await.unwrap(); + storage + .tee_verifier_input_producer_dal() + .create_tee_verifier_input_producer_job(batch_no) + .await?; + + let pubkey = vec![0xDE, 0xAD, 0xBE, 0xEF]; + let attestation = vec![0xC0, 0xFF, 0xEE]; + let mut tee_proof_generation_dal = storage.tee_proof_generation_dal(); + tee_proof_generation_dal + .save_attestation(&pubkey, &attestation) + .await?; + tee_proof_generation_dal + .insert_tee_proof_generation_job(batch_no, tee_type) + .await?; + + let signature = vec![0, 1, 2, 3, 4]; + let proof_vec = vec![5, 6, 7, 8, 9]; + tee_proof_generation_dal + .save_proof_artifacts_metadata(batch_no, tee_type, &pubkey, &signature, &proof_vec) + .await?; + + let proofs = client.tee_proofs(batch_no, Some(tee_type)).await?; + assert!(proofs.len() == 1); + let proof = &proofs[0]; + assert!(proof.l1_batch_number == batch_no); + assert!(proof.tee_type == Some(tee_type)); + assert!(proof.pubkey.as_ref() == Some(&pubkey)); + assert!(proof.signature.as_ref() == Some(&signature)); + assert!(proof.proof.as_ref() == Some(&proof_vec)); + assert!(proof.attestation.as_ref() == Some(&attestation)); + + Ok(()) + } +} + +#[tokio::test] +async fn get_tee_proofs() { + test_http_server(GetTeeProofsTest::new()).await; +} diff --git a/core/node/api_server/src/web3/tests/vm.rs b/core/node/api_server/src/web3/tests/vm.rs index d8d1a2c7768..d8086c6c6ad 100644 --- a/core/node/api_server/src/web3/tests/vm.rs +++ b/core/node/api_server/src/web3/tests/vm.rs @@ -1,14 +1,25 @@ //! Tests for the VM-instantiating methods (e.g., `eth_call`). -use std::sync::atomic::{AtomicU32, Ordering}; +use std::{ + str, + sync::{ + atomic::{AtomicU32, Ordering}, + Mutex, + }, +}; use api::state_override::{OverrideAccount, StateOverride}; use zksync_multivm::interface::{ ExecutionResult, VmExecutionLogs, VmExecutionResultAndLogs, VmRevertReason, }; +use zksync_node_fee_model::BatchFeeModelInputProvider; use zksync_types::{ - api::ApiStorageLog, get_intrinsic_constants, transaction_request::CallRequest, K256PrivateKey, - L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, U256, + api::ApiStorageLog, + fee_model::{BatchFeeInput, FeeParams}, + get_intrinsic_constants, + transaction_request::CallRequest, + K256PrivateKey, L2ChainId, PackedEthSignature, StorageLogKind, StorageLogWithPreviousValue, + U256, }; use zksync_utils::u256_to_h256; use zksync_vm_executor::oneshot::MockOneshotExecutor; @@ -16,8 +27,52 @@ use zksync_web3_decl::namespaces::DebugNamespaceClient; use super::*; -#[derive(Debug)] -struct CallTest; +#[derive(Debug, Clone)] +struct ExpectedFeeInput(Arc>); + +impl Default for ExpectedFeeInput { + fn default() -> Self { + let this = Self(Arc::default()); + this.expect_default(1.0); // works for transaction execution and calls + this + } +} + +impl ExpectedFeeInput { + fn expect_for_block(&self, number: api::BlockNumber, scale: f64) { + *self.0.lock().unwrap() = match number { + api::BlockNumber::Number(number) => create_l2_block(number.as_u32()).batch_fee_input, + _ => ::default_batch_fee_input_scaled( + FeeParams::sensible_v1_default(), + scale, + scale, + ), + }; + } + + fn expect_default(&self, scale: f64) { + self.expect_for_block(api::BlockNumber::Pending, scale); + } + + #[allow(dead_code)] + fn expect_custom(&self, expected: BatchFeeInput) { + *self.0.lock().unwrap() = expected; + } + + fn assert_eq(&self, actual: BatchFeeInput) { + let expected = *self.0.lock().unwrap(); + // We do relaxed comparisons to deal with the fact that the fee input provider may convert inputs to pubdata independent form. + assert_eq!( + actual.into_pubdata_independent(), + expected.into_pubdata_independent() + ); + } +} + +#[derive(Debug, Default)] +struct CallTest { + fee_input: ExpectedFeeInput, +} impl CallTest { fn call_request(data: &[u8]) -> CallRequest { @@ -31,15 +86,27 @@ impl CallTest { } } - fn create_executor(latest_block: L2BlockNumber) -> MockOneshotExecutor { + fn create_executor( + latest_block: L2BlockNumber, + expected_fee_input: ExpectedFeeInput, + ) -> MockOneshotExecutor { let mut tx_executor = MockOneshotExecutor::default(); tx_executor.set_call_responses(move |tx, env| { + expected_fee_input.assert_eq(env.l1_batch.fee_input); + let expected_block_number = match tx.execute.calldata() { - b"pending" => latest_block + 1, - b"latest" => latest_block, + b"pending" => latest_block.0 + 1, + b"latest" => latest_block.0, + block if block.starts_with(b"block=") => str::from_utf8(block) + .expect("non-UTF8 calldata") + .strip_prefix("block=") + .unwrap() + .trim() + .parse() + .expect("invalid block number"), data => panic!("Unexpected calldata: {data:?}"), }; - assert_eq!(env.l1_batch.first_l2_block.number, expected_block_number.0); + assert_eq!(env.l1_batch.first_l2_block.number, expected_block_number); ExecutionResult::Success { output: b"output".to_vec(), @@ -52,7 +119,7 @@ impl CallTest { #[async_trait] impl HttpTest for CallTest { fn transaction_executor(&self) -> MockOneshotExecutor { - Self::create_executor(L2BlockNumber(1)) + Self::create_executor(L2BlockNumber(1), self.fee_input.clone()) } async fn test( @@ -63,7 +130,6 @@ impl HttpTest for CallTest { // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. let mut connection = pool.connection().await?; store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; - drop(connection); let call_result = client .call(Self::call_request(b"pending"), None, None) @@ -73,9 +139,10 @@ impl HttpTest for CallTest { let valid_block_numbers_and_calldata = [ (api::BlockNumber::Pending, b"pending" as &[_]), (api::BlockNumber::Latest, b"latest"), - (0.into(), b"latest"), + (1.into(), b"latest"), ]; for (number, calldata) in valid_block_numbers_and_calldata { + self.fee_input.expect_for_block(number, 1.0); let number = api::BlockIdVariant::BlockNumber(number); let call_result = client .call(Self::call_request(calldata), Some(number), None) @@ -95,17 +162,35 @@ impl HttpTest for CallTest { panic!("Unexpected error: {error:?}"); } + // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block + // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. + let mut block_header = create_l2_block(2); + block_header.batch_fee_input = + ::default_batch_fee_input_scaled( + FeeParams::sensible_v1_default(), + 2.5, + 2.5, + ); + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + // Fee input is not scaled further as per `ApiFeeInputProvider` implementation + self.fee_input.expect_custom(block_header.batch_fee_input); + let call_request = CallTest::call_request(b"block=3"); + let call_result = client.call(call_request.clone(), None, None).await?; + assert_eq!(call_result.0, b"output"); + Ok(()) } } #[tokio::test] async fn call_method_basics() { - test_http_server(CallTest).await; + test_http_server(CallTest::default()).await; } -#[derive(Debug)] -struct CallTestAfterSnapshotRecovery; +#[derive(Debug, Default)] +struct CallTestAfterSnapshotRecovery { + fee_input: ExpectedFeeInput, +} #[async_trait] impl HttpTest for CallTestAfterSnapshotRecovery { @@ -115,7 +200,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { fn transaction_executor(&self) -> MockOneshotExecutor { let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - CallTest::create_executor(first_local_l2_block) + CallTest::create_executor(first_local_l2_block, self.fee_input.clone()) } async fn test( @@ -150,6 +235,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { let first_l2_block_numbers = [api::BlockNumber::Latest, first_local_l2_block.0.into()]; for number in first_l2_block_numbers { + self.fee_input.expect_for_block(number, 1.0); let number = api::BlockIdVariant::BlockNumber(number); let call_result = client .call(CallTest::call_request(b"latest"), Some(number), None) @@ -162,7 +248,7 @@ impl HttpTest for CallTestAfterSnapshotRecovery { #[tokio::test] async fn call_method_after_snapshot_recovery() { - test_http_server(CallTestAfterSnapshotRecovery).await; + test_http_server(CallTestAfterSnapshotRecovery::default()).await; } #[derive(Debug)] @@ -390,10 +476,14 @@ async fn send_raw_transaction_with_detailed_output() { test_http_server(SendTransactionWithDetailedOutputTest).await; } -#[derive(Debug)] -struct TraceCallTest; +#[derive(Debug, Default)] +struct TraceCallTest { + fee_input: ExpectedFeeInput, +} impl TraceCallTest { + const FEE_SCALE: f64 = 1.2; // set in the tx sender config + fn assert_debug_call(call_request: &CallRequest, call_result: &api::DebugCall) { assert_eq!(call_result.from, Address::zero()); assert_eq!(call_result.gas, call_request.gas.unwrap()); @@ -413,7 +503,7 @@ impl TraceCallTest { #[async_trait] impl HttpTest for TraceCallTest { fn transaction_executor(&self) -> MockOneshotExecutor { - CallTest::create_executor(L2BlockNumber(1)) + CallTest::create_executor(L2BlockNumber(1), self.fee_input.clone()) } async fn test( @@ -424,27 +514,33 @@ impl HttpTest for TraceCallTest { // Store an additional L2 block because L2 block #0 has some special processing making it work incorrectly. let mut connection = pool.connection().await?; store_l2_block(&mut connection, L2BlockNumber(1), &[]).await?; - drop(connection); + self.fee_input.expect_default(Self::FEE_SCALE); let call_request = CallTest::call_request(b"pending"); - let call_result = client.trace_call(call_request.clone(), None, None).await?; + let call_result = client + .trace_call(call_request.clone(), None, None) + .await? + .unwrap_default(); Self::assert_debug_call(&call_request, &call_result); let pending_block_number = api::BlockId::Number(api::BlockNumber::Pending); let call_result = client .trace_call(call_request.clone(), Some(pending_block_number), None) - .await?; + .await? + .unwrap_default(); Self::assert_debug_call(&call_request, &call_result); let latest_block_numbers = [api::BlockNumber::Latest, 1.into()]; let call_request = CallTest::call_request(b"latest"); for number in latest_block_numbers { + self.fee_input.expect_for_block(number, Self::FEE_SCALE); let call_result = client .trace_call( call_request.clone(), Some(api::BlockId::Number(number)), None, ) - .await?; + .await? + .unwrap_default(); Self::assert_debug_call(&call_request, &call_result); } @@ -463,17 +559,35 @@ impl HttpTest for TraceCallTest { panic!("Unexpected error: {error:?}"); } + // Check that the method handler fetches fee inputs for recent blocks. To do that, we create a new block + // with a large fee input; it should be loaded by `ApiFeeInputProvider` and override the input provided by the wrapped mock provider. + let mut block_header = create_l2_block(2); + block_header.batch_fee_input = + ::default_batch_fee_input_scaled( + FeeParams::sensible_v1_default(), + 3.0, + 3.0, + ); + store_custom_l2_block(&mut connection, &block_header, &[]).await?; + // Fee input is not scaled further as per `ApiFeeInputProvider` implementation + self.fee_input.expect_custom(block_header.batch_fee_input); + let call_request = CallTest::call_request(b"block=3"); + let call_result = client.trace_call(call_request.clone(), None, None).await?; + Self::assert_debug_call(&call_request, &call_result.unwrap_default()); + Ok(()) } } #[tokio::test] async fn trace_call_basics() { - test_http_server(TraceCallTest).await; + test_http_server(TraceCallTest::default()).await; } -#[derive(Debug)] -struct TraceCallTestAfterSnapshotRecovery; +#[derive(Debug, Default)] +struct TraceCallTestAfterSnapshotRecovery { + fee_input: ExpectedFeeInput, +} #[async_trait] impl HttpTest for TraceCallTestAfterSnapshotRecovery { @@ -483,7 +597,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { fn transaction_executor(&self) -> MockOneshotExecutor { let number = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; - CallTest::create_executor(number) + CallTest::create_executor(number, self.fee_input.clone()) } async fn test( @@ -491,13 +605,18 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { client: &DynClient, _pool: &ConnectionPool, ) -> anyhow::Result<()> { + self.fee_input.expect_default(TraceCallTest::FEE_SCALE); let call_request = CallTest::call_request(b"pending"); - let call_result = client.trace_call(call_request.clone(), None, None).await?; + let call_result = client + .trace_call(call_request.clone(), None, None) + .await? + .unwrap_default(); TraceCallTest::assert_debug_call(&call_request, &call_result); let pending_block_number = api::BlockId::Number(api::BlockNumber::Pending); let call_result = client .trace_call(call_request.clone(), Some(pending_block_number), None) - .await?; + .await? + .unwrap_default(); TraceCallTest::assert_debug_call(&call_request, &call_result); let first_local_l2_block = StorageInitialization::SNAPSHOT_RECOVERY_BLOCK + 1; @@ -514,10 +633,13 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { let call_request = CallTest::call_request(b"latest"); let first_l2_block_numbers = [api::BlockNumber::Latest, first_local_l2_block.0.into()]; for number in first_l2_block_numbers { + self.fee_input + .expect_for_block(number, TraceCallTest::FEE_SCALE); let number = api::BlockId::Number(number); let call_result = client .trace_call(call_request.clone(), Some(number), None) - .await?; + .await? + .unwrap_default(); TraceCallTest::assert_debug_call(&call_request, &call_result); } Ok(()) @@ -526,7 +648,7 @@ impl HttpTest for TraceCallTestAfterSnapshotRecovery { #[tokio::test] async fn trace_call_after_snapshot_recovery() { - test_http_server(TraceCallTestAfterSnapshotRecovery).await; + test_http_server(TraceCallTestAfterSnapshotRecovery::default()).await; } #[derive(Debug)] diff --git a/core/node/api_server/src/web3/tests/ws.rs b/core/node/api_server/src/web3/tests/ws.rs index 39f991aba04..28b2e2beb55 100644 --- a/core/node/api_server/src/web3/tests/ws.rs +++ b/core/node/api_server/src/web3/tests/ws.rs @@ -177,13 +177,9 @@ async fn test_ws_server(test: impl WsTest) { drop(storage); let (stop_sender, stop_receiver) = watch::channel(false); - let (mut server_handles, pub_sub_events) = spawn_ws_server( - api_config, - pool.clone(), - stop_receiver, - test.websocket_requests_per_minute_limit(), - ) - .await; + let (mut server_handles, pub_sub_events) = TestServerBuilder::new(pool.clone(), api_config) + .build_ws(test.websocket_requests_per_minute_limit(), stop_receiver) + .await; let local_addr = server_handles.wait_until_ready().await; let client = Client::ws(format!("ws://{local_addr}").parse().unwrap()) diff --git a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs index 220f100e5dc..785c9c4dfd7 100644 --- a/core/node/base_token_adjuster/src/base_token_ratio_persister.rs +++ b/core/node/base_token_adjuster/src/base_token_ratio_persister.rs @@ -90,6 +90,9 @@ impl BaseTokenRatioPersister { result: OperationResult::Success, }] .observe(start_time.elapsed()); + METRICS + .ratio + .set((ratio.numerator.get() as f64) / (ratio.denominator.get() as f64)); return Ok(ratio); } Err(err) => { diff --git a/core/node/base_token_adjuster/src/metrics.rs b/core/node/base_token_adjuster/src/metrics.rs index d84e4da0c0c..17a48c1b5c3 100644 --- a/core/node/base_token_adjuster/src/metrics.rs +++ b/core/node/base_token_adjuster/src/metrics.rs @@ -18,6 +18,7 @@ pub(crate) struct OperationResultLabels { #[metrics(prefix = "base_token_adjuster")] pub(crate) struct BaseTokenAdjusterMetrics { pub l1_gas_used: Gauge, + pub ratio: Gauge, #[metrics(buckets = Buckets::LATENCIES)] pub external_price_api_latency: Family>, #[metrics(buckets = Buckets::LATENCIES)] diff --git a/core/node/consensus/Cargo.toml b/core/node/consensus/Cargo.toml index 707bd957d81..fdcc9089e33 100644 --- a/core/node/consensus/Cargo.toml +++ b/core/node/consensus/Cargo.toml @@ -32,8 +32,8 @@ zksync_system_constants.workspace = true zksync_types.workspace = true zksync_utils.workspace = true zksync_web3_decl.workspace = true -zksync_node_api_server.workspace = true zksync_state.workspace = true +zksync_vm_executor.workspace = true zksync_vm_interface.workspace = true anyhow.workspace = true async-trait.workspace = true @@ -41,7 +41,6 @@ secrecy.workspace = true tempfile.workspace = true thiserror.workspace = true tracing.workspace = true -hex.workspace = true tokio.workspace = true semver.workspace = true diff --git a/core/node/consensus/src/config.rs b/core/node/consensus/src/config.rs index 22f8fc01192..3584d533f66 100644 --- a/core/node/consensus/src/config.rs +++ b/core/node/consensus/src/config.rs @@ -1,5 +1,5 @@ //! Configuration utilities for the consensus component. -use std::collections::HashMap; +use std::collections::{BTreeMap, HashMap}; use anyhow::Context as _; use secrecy::{ExposeSecret as _, Secret}; @@ -10,6 +10,7 @@ use zksync_config::{ }; use zksync_consensus_crypto::{Text, TextFmt}; use zksync_consensus_executor as executor; +use zksync_consensus_network as network; use zksync_consensus_roles::{attester, node, validator}; use zksync_dal::consensus_dal; use zksync_types::ethabi; @@ -44,6 +45,7 @@ pub(super) struct GenesisSpec { pub(super) attesters: Option, pub(super) leader_selection: validator::LeaderSelectionMode, pub(super) registry_address: Option, + pub(super) seed_peers: BTreeMap, } impl GenesisSpec { @@ -55,6 +57,7 @@ impl GenesisSpec { attesters: cfg.genesis.attesters.clone(), leader_selection: cfg.genesis.leader_selection.clone(), registry_address: cfg.registry_address, + seed_peers: cfg.seed_peers.clone(), } } @@ -98,6 +101,19 @@ impl GenesisSpec { Some(attester::Committee::new(attesters).context("attesters")?) }, registry_address: x.registry_address, + seed_peers: x + .seed_peers + .iter() + .map(|(key, addr)| { + anyhow::Ok(( + Text::new(&key.0) + .decode::() + .context("key")?, + net::Host(addr.0.clone()), + )) + }) + .collect::>() + .context("seed_peers")?, }) } } @@ -109,9 +125,18 @@ pub(super) fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result, ) -> anyhow::Result { - let mut gossip_static_outbound = HashMap::new(); + // Always connect to seed peers. + // Once we implement dynamic peer discovery, + // we won't establish a persistent connection to seed peers + // but rather just ask them for more peers. + let mut gossip_static_outbound: HashMap<_, _> = global_config + .seed_peers + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(); { let mut append = |key: &NodePublicKey, addr: &Host| { gossip_static_outbound.insert( @@ -133,6 +158,12 @@ pub(super) fn executor( refresh: time::Duration::ZERO, }; + let debug_page = cfg.debug_page_addr.map(|addr| network::debug_page::Config { + addr, + credentials: None, + tls: None, + }); + Ok(executor::Config { build_version, server_addr: cfg.server_addr, @@ -152,8 +183,7 @@ pub(super) fn executor( .context("gossip_static_inbound")?, gossip_static_outbound, rpc, - // TODO: Add to configuration - debug_page: None, + debug_page, batch_poll_interval: time::Duration::seconds(1), }) } diff --git a/core/node/consensus/src/en.rs b/core/node/consensus/src/en.rs index a52393c0f48..e4be8d9d687 100644 --- a/core/node/consensus/src/en.rs +++ b/core/node/consensus/src/en.rs @@ -125,7 +125,7 @@ impl EN { )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets, build_version)?, + config: config::executor(&cfg, &secrets, &global_config, build_version)?, block_store, batch_store, validator: config::validator_key(&secrets) @@ -282,7 +282,11 @@ impl EN { match ctx.wait(self.client.consensus_global_config()).await? { Ok(cfg) => { let cfg = cfg.context("main node is not running consensus component")?; - return Ok(zksync_protobuf::serde::deserialize(&cfg.0).context("deserialize()")?); + return Ok(zksync_protobuf::serde::Deserialize { + deny_unknown_fields: false, + } + .proto_fmt(&cfg.0) + .context("deserialize()")?); } // For non-whitelisted methods, proxyd returns HTTP 403 with MethodNotFound in the body. // For some stupid reason ClientError doesn't expose HTTP error codes. @@ -302,8 +306,13 @@ impl EN { .context("consensus_genesis()")? .context("main node is not running consensus component")?; Ok(consensus_dal::GlobalConfig { - genesis: zksync_protobuf::serde::deserialize(&genesis.0).context("deserialize()")?, + genesis: zksync_protobuf::serde::Deserialize { + deny_unknown_fields: false, + } + .proto_fmt(&genesis.0) + .context("deserialize()")?, registry_address: None, + seed_peers: [].into(), }) } @@ -317,7 +326,11 @@ impl EN { .await? .context("attestation_status()")? .context("main node is not runnign consensus component")?; - Ok(zksync_protobuf::serde::deserialize(&status.0).context("deserialize()")?) + Ok(zksync_protobuf::serde::Deserialize { + deny_unknown_fields: false, + } + .proto_fmt(&status.0) + .context("deserialize()")?) } /// Fetches (with retries) the given block from the main node. diff --git a/core/node/consensus/src/mn.rs b/core/node/consensus/src/mn.rs index 4d428346ebe..f80bfe58954 100644 --- a/core/node/consensus/src/mn.rs +++ b/core/node/consensus/src/mn.rs @@ -76,12 +76,12 @@ pub async fn run_main_node( s.spawn_bg(run_attestation_controller( ctx, &pool, - global_config, + global_config.clone(), attestation.clone(), )); let executor = executor::Executor { - config: config::executor(&cfg, &secrets, None)?, + config: config::executor(&cfg, &secrets, &global_config, None)?, block_store, batch_store, validator: Some(executor::Validator { diff --git a/core/node/consensus/src/registry/testonly.rs b/core/node/consensus/src/registry/testonly.rs index a0c55a557fe..07a87e3b676 100644 --- a/core/node/consensus/src/registry/testonly.rs +++ b/core/node/consensus/src/registry/testonly.rs @@ -13,7 +13,7 @@ pub(crate) fn make_tx( ) -> Transaction { account.get_l2_tx_for_execute( Execute { - contract_address: *address, + contract_address: Some(*address), calldata: call.calldata().unwrap(), value: U256::zero(), factory_deps: vec![], diff --git a/core/node/consensus/src/storage/connection.rs b/core/node/consensus/src/storage/connection.rs index 512b37e81a1..0f9d7c8527f 100644 --- a/core/node/consensus/src/storage/connection.rs +++ b/core/node/consensus/src/storage/connection.rs @@ -5,10 +5,12 @@ use zksync_consensus_roles::{attester, attester::BatchNumber, validator}; use zksync_consensus_storage::{self as storage, BatchStoreState}; use zksync_dal::{consensus_dal, consensus_dal::Payload, Core, CoreDal, DalError}; use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; -use zksync_node_api_server::execution_sandbox::{BlockArgs, BlockStartInfo}; use zksync_node_sync::{fetcher::IoCursorExt as _, ActionQueueSender, SyncState}; use zksync_state_keeper::io::common::IoCursor; -use zksync_types::{api, commitment::L1BatchWithMetadata, L1BatchNumber}; +use zksync_types::{ + commitment::L1BatchWithMetadata, fee_model::BatchFeeInput, L1BatchNumber, L2BlockNumber, +}; +use zksync_vm_executor::oneshot::{BlockInfo, ResolvedBlockInfo}; use super::{InsertCertificateError, PayloadQueue}; use crate::config; @@ -317,6 +319,7 @@ impl<'a> Connection<'a> { } .with_hash(), registry_address: spec.registry_address, + seed_peers: spec.seed_peers.clone(), }; txn.try_update_global_config(ctx, &new) @@ -469,27 +472,30 @@ impl<'a> Connection<'a> { } /// Constructs `BlockArgs` for the last block of the batch. - pub async fn vm_block_args( + pub async fn vm_block_info( &mut self, ctx: &ctx::Ctx, batch: attester::BatchNumber, - ) -> ctx::Result { + ) -> ctx::Result<(ResolvedBlockInfo, BatchFeeInput)> { let (_, block) = self .get_l2_block_range_of_l1_batch(ctx, batch) .await .wrap("get_l2_block_range_of_l1_batch()")? .context("batch not sealed")?; - let block = api::BlockId::Number(api::BlockNumber::Number(block.0.into())); - let start_info = ctx - .wait(BlockStartInfo::new( - &mut self.0, - /*max_cache_age=*/ std::time::Duration::from_secs(10), - )) + // `unwrap()` is safe: the block range is returned as `L2BlockNumber`s + let block = L2BlockNumber(u32::try_from(block.0).unwrap()); + let block_info = ctx + .wait(BlockInfo::for_existing_block(&mut self.0, block)) .await? - .context("BlockStartInfo::new()")?; - Ok(ctx - .wait(BlockArgs::new(&mut self.0, block, &start_info)) + .context("BlockInfo")?; + let resolved_block_info = ctx + .wait(block_info.resolve(&mut self.0)) + .await? + .context("resolve()")?; + let fee_input = ctx + .wait(block_info.historical_fee_input(&mut self.0)) .await? - .context("BlockArgs::new")?) + .context("historical_fee_input()")?; + Ok((resolved_block_info, fee_input)) } } diff --git a/core/node/consensus/src/testonly.rs b/core/node/consensus/src/testonly.rs index 241998f2692..04a2dfbc083 100644 --- a/core/node/consensus/src/testonly.rs +++ b/core/node/consensus/src/testonly.rs @@ -20,7 +20,7 @@ use zksync_l1_contract_interface::i_executor::structures::StoredBatchInfo; use zksync_metadata_calculator::{ LazyAsyncTreeReader, MetadataCalculator, MetadataCalculatorConfig, }; -use zksync_node_api_server::web3::{state::InternalApiConfig, testonly::spawn_http_server}; +use zksync_node_api_server::web3::{state::InternalApiConfig, testonly::TestServerBuilder}; use zksync_node_genesis::GenesisParams; use zksync_node_sync::{ fetcher::{FetchedTransaction, IoCursorExt as _}, @@ -91,11 +91,8 @@ impl ConfigSet { } } -pub(super) fn new_configs( - rng: &mut impl Rng, - setup: &Setup, - gossip_peers: usize, -) -> Vec { +pub(super) fn new_configs(rng: &mut impl Rng, setup: &Setup, seed_peers: usize) -> Vec { + let net_cfgs = network::testonly::new_configs(rng, setup, 0); let genesis_spec = config::GenesisSpec { chain_id: setup.genesis.chain_id.0.try_into().unwrap(), protocol_version: config::ProtocolVersion(setup.genesis.protocol_version.0), @@ -117,8 +114,17 @@ pub(super) fn new_configs( .collect(), leader: config::ValidatorPublicKey(setup.validator_keys[0].public().encode()), registry_address: None, + seed_peers: net_cfgs[..seed_peers] + .iter() + .map(|c| { + ( + config::NodePublicKey(c.gossip.key.public().encode()), + config::Host(c.public_addr.0.clone()), + ) + }) + .collect(), }; - network::testonly::new_configs(rng, setup, gossip_peers) + net_cfgs .into_iter() .enumerate() .map(|(i, net)| ConfigSet { @@ -172,6 +178,7 @@ fn make_config( // genesis generator for zksync-era tests. genesis_spec, rpc: None, + debug_page_addr: None, } } @@ -615,7 +622,7 @@ impl StateKeeperRunner { }); s.spawn_bg({ - let executor_factory = MainBatchExecutorFactory::new(false, false); + let executor_factory = MainBatchExecutorFactory::<()>::new(false); let stop_recv = stop_recv.clone(); async { ZkSyncStateKeeper::new( @@ -640,14 +647,9 @@ impl StateKeeperRunner { &configs::contracts::ContractsConfig::for_tests(), &configs::GenesisConfig::for_tests(), ); - let mut server = spawn_http_server( - cfg, - self.pool.0.clone(), - Default::default(), - Arc::default(), - stop_recv, - ) - .await; + let mut server = TestServerBuilder::new(self.pool.0.clone(), cfg) + .build_http(stop_recv) + .await; if let Ok(addr) = ctx.wait(server.wait_until_ready()).await { self.addr.send_replace(Some(addr)); tracing::info!("API server ready!"); @@ -725,14 +727,9 @@ impl StateKeeperRunner { &configs::contracts::ContractsConfig::for_tests(), &configs::GenesisConfig::for_tests(), ); - let mut server = spawn_http_server( - cfg, - self.pool.0.clone(), - Default::default(), - Arc::default(), - stop_recv, - ) - .await; + let mut server = TestServerBuilder::new(self.pool.0.clone(), cfg) + .build_http(stop_recv) + .await; if let Ok(addr) = ctx.wait(server.wait_until_ready()).await { self.addr.send_replace(Some(addr)); tracing::info!("API server ready!"); diff --git a/core/node/consensus/src/tests/attestation.rs b/core/node/consensus/src/tests/attestation.rs index abd35508c7f..35d849ae616 100644 --- a/core/node/consensus/src/tests/attestation.rs +++ b/core/node/consensus/src/tests/attestation.rs @@ -47,6 +47,7 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { &consensus_dal::GlobalConfig { genesis: setup.genesis.clone(), registry_address: None, + seed_peers: [].into(), }, ) .await @@ -62,8 +63,11 @@ async fn test_attestation_status_api(version: ProtocolVersionId) { .wait(api.attestation_status()) .await?? .context("no attestation_status")?; - let s: consensus_dal::AttestationStatus = - zksync_protobuf::serde::deserialize(&s.0).context("deserialize()")?; + let s: consensus_dal::AttestationStatus = zksync_protobuf::serde::Deserialize { + deny_unknown_fields: true, + } + .proto_fmt(&s.0) + .context("deserialize()")?; anyhow::ensure!(s.genesis == setup.genesis.hash(), "genesis hash mismatch"); Ok(s) }; diff --git a/core/node/consensus/src/tests/mod.rs b/core/node/consensus/src/tests/mod.rs index 91f01f865a2..52abe3c810c 100644 --- a/core/node/consensus/src/tests/mod.rs +++ b/core/node/consensus/src/tests/mod.rs @@ -1,9 +1,12 @@ use anyhow::Context as _; +use rand::Rng as _; use test_casing::{test_casing, Product}; use tracing::Instrument as _; -use zksync_concurrency::{ctx, error::Wrap, scope}; +use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use zksync_config::configs::consensus as config; +use zksync_consensus_crypto::TextFmt as _; use zksync_consensus_roles::{ - validator, + node, validator, validator::testonly::{Setup, SetupSpec}, }; use zksync_consensus_storage::BlockStore; @@ -49,6 +52,7 @@ async fn test_validator_block_store(version: ProtocolVersionId) { &consensus_dal::GlobalConfig { genesis: setup.genesis.clone(), registry_address: None, + seed_peers: [].into(), }, ) .await @@ -241,6 +245,80 @@ async fn test_nodes_from_various_snapshots(version: ProtocolVersionId) { .unwrap(); } +#[test_casing(4, Product((FROM_SNAPSHOT,VERSIONS)))] +#[tokio::test] +async fn test_config_change(from_snapshot: bool, version: ProtocolVersionId) { + zksync_concurrency::testonly::abort_on_panic(); + let ctx = &ctx::test_root(&ctx::AffineClock::new(10.)); + let rng = &mut ctx.rng(); + let setup = Setup::new(rng, 1); + let mut validator_cfg = testonly::new_configs(rng, &setup, 0)[0].clone(); + let node_cfg = validator_cfg.new_fullnode(rng); + let account = &mut Account::random(); + + let validator_pool = ConnectionPool::test(from_snapshot, version).await; + let node_pool = ConnectionPool::test(from_snapshot, version).await; + + for i in 0..3 { + tracing::info!("iteration {i}"); + tracing::info!("apply an arbitrary change to configuration"); + validator_cfg + .config + .genesis_spec + .as_mut() + .unwrap() + .seed_peers + .insert( + config::NodePublicKey(rng.gen::().encode()), + config::Host("43.54.22.88:7634".to_string()), + ); + scope::run!(ctx, |ctx, s| async { + let (mut validator, runner) = + testonly::StateKeeper::new(ctx, validator_pool.clone()).await?; + s.spawn_bg(async { + runner + .run(ctx) + .instrument(tracing::info_span!("validator")) + .await + .context("validator") + }); + tracing::info!("Run validator."); + s.spawn_bg(run_main_node( + ctx, + validator_cfg.config.clone(), + validator_cfg.secrets.clone(), + validator_pool.clone(), + )); + + // Wait for the main node to start and to update the global config. + validator.seal_batch().await; + validator_pool + .wait_for_block_certificate(ctx, validator.last_block()) + .await?; + + tracing::info!("Run node."); + let (node, runner) = testonly::StateKeeper::new(ctx, node_pool.clone()).await?; + s.spawn_bg(runner.run(ctx).instrument(tracing::info_span!("node"))); + s.spawn_bg(node.run_consensus(ctx, validator.connect(ctx).await?, node_cfg.clone())); + + validator.push_random_blocks(rng, account, 5).await; + let want_last = validator.last_block(); + let want = validator_pool + .wait_for_block_certificates_and_verify(ctx, want_last) + .await?; + assert_eq!( + want, + node_pool + .wait_for_block_certificates_and_verify(ctx, want_last) + .await? + ); + Ok(()) + }) + .await + .unwrap(); + } +} + // Test running a validator node and a couple of full nodes. // Validator is producing signed blocks and fetchers are expected to fetch // them directly or indirectly. diff --git a/core/node/consensus/src/vm.rs b/core/node/consensus/src/vm.rs index 11b6b5c67e3..149e6b3ccb0 100644 --- a/core/node/consensus/src/vm.rs +++ b/core/node/consensus/src/vm.rs @@ -1,17 +1,13 @@ use anyhow::Context as _; -use zksync_concurrency::{ctx, error::Wrap as _, scope}; +use tokio::runtime::Handle; +use zksync_concurrency::{ctx, error::Wrap as _}; use zksync_consensus_roles::attester; -use zksync_node_api_server::{ - execution_sandbox::{TransactionExecutor, TxSetupArgs, VmConcurrencyLimiter}, - tx_sender::MultiVMBaseSystemContracts, -}; -use zksync_state::PostgresStorageCaches; +use zksync_state::PostgresStorage; use zksync_system_constants::DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE; -use zksync_types::{ - ethabi, fee::Fee, fee_model::BatchFeeInput, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256, -}; +use zksync_types::{ethabi, fee::Fee, l2::L2Tx, AccountTreeId, L2ChainId, Nonce, U256}; +use zksync_vm_executor::oneshot::{CallOrExecute, MainOneshotExecutor, OneshotEnvParameters}; use zksync_vm_interface::{ - ExecutionResult, OneshotTracingParams, TxExecutionArgs, TxExecutionMode, + executor::OneshotExecutor, ExecutionResult, OneshotTracingParams, TxExecutionArgs, }; use crate::{abi, storage::ConnectionPool}; @@ -20,8 +16,8 @@ use crate::{abi, storage::ConnectionPool}; #[derive(Debug)] pub(crate) struct VM { pool: ConnectionPool, - setup_args: TxSetupArgs, - limiter: VmConcurrencyLimiter, + options: OneshotEnvParameters, + executor: MainOneshotExecutor, } impl VM { @@ -29,25 +25,18 @@ impl VM { pub async fn new(pool: ConnectionPool) -> Self { Self { pool, - setup_args: TxSetupArgs { - execution_mode: TxExecutionMode::EthCall, - operator_account: AccountTreeId::default(), - fee_input: BatchFeeInput::sensible_l1_pegged_default(), - base_system_contracts: scope::wait_blocking( - MultiVMBaseSystemContracts::load_eth_call_blocking, - ) - .await, - caches: PostgresStorageCaches::new(1, 1), - validation_computational_gas_limit: u32::MAX, - chain_id: L2ChainId::default(), - whitelisted_tokens_for_aa: vec![], - enforced_base_fee: None, - }, - limiter: VmConcurrencyLimiter::new(1).0, + // L2 chain ID and fee account don't seem to matter for calls, hence the use of default values. + options: OneshotEnvParameters::for_execution( + L2ChainId::default(), + AccountTreeId::default(), + u32::MAX, + ) + .await + .expect("OneshotExecutorOptions"), + executor: MainOneshotExecutor::new(usize::MAX), } } - // FIXME (PLA-1018): switch to oneshot executor pub async fn call( &self, ctx: &ctx::Ctx, @@ -56,7 +45,7 @@ impl VM { call: abi::Call, ) -> ctx::Result { let tx = L2Tx::new( - *address, + Some(*address), call.calldata().context("call.calldata()")?, Nonce(0), Fee { @@ -70,25 +59,39 @@ impl VM { vec![], Default::default(), ); - let permit = ctx.wait(self.limiter.acquire()).await?.unwrap(); + let mut conn = self.pool.connection(ctx).await.wrap("connection()")?; - let args = conn - .vm_block_args(ctx, batch) + let (block_info, fee_input) = conn + .vm_block_info(ctx, batch) .await - .wrap("vm_block_args()")?; - let output = ctx - .wait(TransactionExecutor::real(usize::MAX).execute_tx_in_sandbox( - permit, - self.setup_args.clone(), - TxExecutionArgs::for_eth_call(tx.clone()), + .wrap("vm_block_info()")?; + let env = ctx + .wait( + self.options + .to_call_env(&mut conn.0, &block_info, fee_input, None), + ) + .await? + .context("to_env()")?; + let storage = ctx + .wait(PostgresStorage::new_async( + Handle::current(), conn.0, - args, - None, + block_info.state_l2_block_number(), + false, + )) + .await? + .context("PostgresStorage")?; + + let output = ctx + .wait(self.executor.inspect_transaction_with_bytecode_compression( + storage, + env, + TxExecutionArgs::for_eth_call(tx), OneshotTracingParams::default(), )) .await? .context("execute_tx_in_sandbox()")?; - match output.vm.result { + match output.tx_result.result { ExecutionResult::Success { output } => { Ok(call.decode_outputs(&output).context("decode_output()")?) } diff --git a/core/node/da_clients/src/avail/client.rs b/core/node/da_clients/src/avail/client.rs index 021906d73a0..7718691bf18 100644 --- a/core/node/da_clients/src/avail/client.rs +++ b/core/node/da_clients/src/avail/client.rs @@ -2,7 +2,8 @@ use std::{fmt::Debug, sync::Arc}; use async_trait::async_trait; use jsonrpsee::ws_client::WsClientBuilder; -use zksync_config::AvailConfig; +use subxt_signer::ExposeSecret; +use zksync_config::configs::da_client::avail::{AvailConfig, AvailSecrets}; use zksync_da_client::{ types::{DAError, DispatchResponse, InclusionData}, DataAvailabilityClient, @@ -18,8 +19,11 @@ pub struct AvailClient { } impl AvailClient { - pub async fn new(config: AvailConfig) -> anyhow::Result { - let sdk_client = RawAvailClient::new(config.app_id, config.seed.clone()).await?; + pub async fn new(config: AvailConfig, secrets: AvailSecrets) -> anyhow::Result { + let seed_phrase = secrets + .seed_phrase + .ok_or_else(|| anyhow::anyhow!("seed phrase"))?; + let sdk_client = RawAvailClient::new(config.app_id, seed_phrase.0.expose_secret()).await?; Ok(Self { config, diff --git a/core/node/da_clients/src/avail/sdk.rs b/core/node/da_clients/src/avail/sdk.rs index 5e67540fcc6..002422109d0 100644 --- a/core/node/da_clients/src/avail/sdk.rs +++ b/core/node/da_clients/src/avail/sdk.rs @@ -40,7 +40,7 @@ struct BoundedVec<_0>(pub Vec<_0>); impl RawAvailClient { pub(crate) const MAX_BLOB_SIZE: usize = 512 * 1024; // 512kb - pub(crate) async fn new(app_id: u32, seed: String) -> anyhow::Result { + pub(crate) async fn new(app_id: u32, seed: &str) -> anyhow::Result { let mnemonic = Mnemonic::parse(seed)?; let keypair = Keypair::from_phrase(&mnemonic, None)?; diff --git a/core/node/eth_sender/src/aggregator.rs b/core/node/eth_sender/src/aggregator.rs index 1e0bd315b9d..4045e9ca3d8 100644 --- a/core/node/eth_sender/src/aggregator.rs +++ b/core/node/eth_sender/src/aggregator.rs @@ -526,22 +526,5 @@ pub async fn load_wrapped_fri_proofs_for_range( } } - // We also check file with deprecated name if patch 0 is allowed. - // TODO: remove in the next release. - let is_patch_0_present = allowed_versions.iter().any(|v| v.patch.0 == 0); - if is_patch_0_present { - match blob_store - .get_by_encoded_key(format!("l1_batch_proof_{l1_batch_number}.bin")) - .await - { - Ok(proof) => return Some(proof), - Err(ObjectStoreError::KeyNotFound(_)) => (), // do nothing, proof is not ready yet - Err(err) => panic!( - "Failed to load proof for batch {}: {}", - l1_batch_number.0, err - ), - } - } - None } diff --git a/core/node/eth_sender/src/eth_fees_oracle.rs b/core/node/eth_sender/src/eth_fees_oracle.rs index 271a33d49c3..ebd1568edb6 100644 --- a/core/node/eth_sender/src/eth_fees_oracle.rs +++ b/core/node/eth_sender/src/eth_fees_oracle.rs @@ -23,7 +23,7 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result; } @@ -32,16 +32,28 @@ pub(crate) trait EthFeesOracle: 'static + Sync + Send + fmt::Debug { pub(crate) struct GasAdjusterFeesOracle { pub gas_adjuster: Arc, pub max_acceptable_priority_fee_in_gwei: u64, + pub time_in_mempool_in_l1_blocks_cap: u32, } impl GasAdjusterFeesOracle { + fn assert_fee_is_not_zero(&self, value: u64, fee_type: &'static str) { + if value == 0 { + panic!( + "L1 RPC incorrectly reported {fee_type} prices, either it doesn't return them at \ + all or returns 0's, eth-sender cannot continue without proper {fee_type} prices!" + ); + } + } fn calculate_fees_with_blob_sidecar( &self, previous_sent_tx: &Option, ) -> Result { let base_fee_per_gas = self.gas_adjuster.get_blob_tx_base_fee(); + self.assert_fee_is_not_zero(base_fee_per_gas, "base"); let priority_fee_per_gas = self.gas_adjuster.get_blob_tx_priority_fee(); - let blob_base_fee_per_gas = Some(self.gas_adjuster.get_blob_tx_blob_base_fee()); + let blob_base_fee_per_gas = self.gas_adjuster.get_blob_tx_blob_base_fee(); + self.assert_fee_is_not_zero(blob_base_fee_per_gas, "blob"); + let blob_base_fee_per_gas = Some(blob_base_fee_per_gas); if let Some(previous_sent_tx) = previous_sent_tx { // for blob transactions on re-sending need to double all gas prices @@ -69,9 +81,17 @@ impl GasAdjusterFeesOracle { fn calculate_fees_no_blob_sidecar( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, ) -> Result { - let mut base_fee_per_gas = self.gas_adjuster.get_base_fee(time_in_mempool); + // we cap it to not allow nearly infinite values when a tx is stuck for a long time + let capped_time_in_mempool_in_l1_blocks = min( + time_in_mempool_in_l1_blocks, + self.time_in_mempool_in_l1_blocks_cap, + ); + let mut base_fee_per_gas = self + .gas_adjuster + .get_base_fee(capped_time_in_mempool_in_l1_blocks); + self.assert_fee_is_not_zero(base_fee_per_gas, "base"); if let Some(previous_sent_tx) = previous_sent_tx { self.verify_base_fee_not_too_low_on_resend( previous_sent_tx.id, @@ -148,14 +168,14 @@ impl EthFeesOracle for GasAdjusterFeesOracle { fn calculate_fees( &self, previous_sent_tx: &Option, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, operator_type: OperatorType, ) -> Result { let has_blob_sidecar = operator_type == OperatorType::Blob; if has_blob_sidecar { self.calculate_fees_with_blob_sidecar(previous_sent_tx) } else { - self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool) + self.calculate_fees_no_blob_sidecar(previous_sent_tx, time_in_mempool_in_l1_blocks) } } } diff --git a/core/node/eth_sender/src/eth_tx_manager.rs b/core/node/eth_sender/src/eth_tx_manager.rs index 0d78ab71c62..7de91a3b773 100644 --- a/core/node/eth_sender/src/eth_tx_manager.rs +++ b/core/node/eth_sender/src/eth_tx_manager.rs @@ -48,6 +48,7 @@ impl EthTxManager { let fees_oracle = GasAdjusterFeesOracle { gas_adjuster, max_acceptable_priority_fee_in_gwei: config.max_acceptable_priority_fee_in_gwei, + time_in_mempool_in_l1_blocks_cap: config.time_in_mempool_in_l1_blocks_cap, }; let l1_interface = Box::new(RealL1Interface { ethereum_gateway, @@ -111,7 +112,7 @@ impl EthTxManager { &mut self, storage: &mut Connection<'_, Core>, tx: &EthTx, - time_in_mempool: u32, + time_in_mempool_in_l1_blocks: u32, current_block: L1BlockNumber, ) -> Result { let previous_sent_tx = storage @@ -127,7 +128,7 @@ impl EthTxManager { pubdata_price: _, } = self.fees_oracle.calculate_fees( &previous_sent_tx, - time_in_mempool, + time_in_mempool_in_l1_blocks, self.operator_type(tx), )?; @@ -601,13 +602,18 @@ impl EthTxManager { .await? { // New gas price depends on the time this tx spent in mempool. - let time_in_mempool = l1_block_numbers.latest.0 - sent_at_block; + let time_in_mempool_in_l1_blocks = l1_block_numbers.latest.0 - sent_at_block; // We don't want to return early in case resend does not succeed - // the error is logged anyway, but early returns will prevent // sending new operations. let _ = self - .send_eth_tx(storage, &tx, time_in_mempool, l1_block_numbers.latest) + .send_eth_tx( + storage, + &tx, + time_in_mempool_in_l1_blocks, + l1_block_numbers.latest, + ) .await?; } Ok(()) diff --git a/core/node/eth_sender/src/tester.rs b/core/node/eth_sender/src/tester.rs index c6d993a9c97..9be1384daae 100644 --- a/core/node/eth_sender/src/tester.rs +++ b/core/node/eth_sender/src/tester.rs @@ -153,7 +153,7 @@ impl EthSenderTester { .into_iter() .map(|base_fee_per_gas| BaseFees { base_fee_per_gas, - base_fee_per_blob_gas: 0.into(), + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .collect(); @@ -161,8 +161,8 @@ impl EthSenderTester { let gateway = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) @@ -181,8 +181,8 @@ impl EthSenderTester { let l2_gateway: MockSettlementLayer = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) @@ -201,8 +201,8 @@ impl EthSenderTester { let gateway_blobs = MockSettlementLayer::builder() .with_fee_history( std::iter::repeat_with(|| BaseFees { - base_fee_per_gas: 0, - base_fee_per_blob_gas: 0.into(), + base_fee_per_gas: 1, + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: 0.into(), }) .take(Self::WAIT_CONFIRMATIONS as usize) diff --git a/core/node/eth_sender/src/tests.rs b/core/node/eth_sender/src/tests.rs index 1912e246e7e..148022bc774 100644 --- a/core/node/eth_sender/src/tests.rs +++ b/core/node/eth_sender/src/tests.rs @@ -82,7 +82,7 @@ pub(crate) fn default_l1_batch_metadata() -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], - da_blob_id: Some(vec![]) + da_blob_id: Some(vec![]), } } diff --git a/core/node/eth_watch/Cargo.toml b/core/node/eth_watch/Cargo.toml index a3d6325f4a2..985649c35da 100644 --- a/core/node/eth_watch/Cargo.toml +++ b/core/node/eth_watch/Cargo.toml @@ -28,3 +28,4 @@ async-recursion.workspace = true [dev-dependencies] zksync_concurrency.workspace = true +test-log.workspace = true diff --git a/core/node/eth_watch/src/client.rs b/core/node/eth_watch/src/client.rs index 237c8e5bc2e..ac5fc86c6e9 100644 --- a/core/node/eth_watch/src/client.rs +++ b/core/node/eth_watch/src/client.rs @@ -1,7 +1,9 @@ use std::fmt; use anyhow::Context; -use zksync_contracts::{state_transition_manager_contract, verifier_contract}; +use zksync_contracts::{ + getters_facet_contract, state_transition_manager_contract, verifier_contract, +}; use zksync_eth_client::{ clients::{DynClient, L1}, CallFunctionArgs, ClientError, ContractCallError, EnrichedClientError, EnrichedClientResult, @@ -10,7 +12,7 @@ use zksync_eth_client::{ use zksync_types::{ ethabi::Contract, web3::{BlockId, BlockNumber, FilterBuilder, Log}, - Address, H256, + Address, SLChainId, H256, U256, }; /// L1 client functionality used by [`EthWatch`](crate::EthWatch) and constituent event processors. @@ -21,10 +23,14 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { &self, from: BlockNumber, to: BlockNumber, + topic1: H256, + topic2: Option, retries_left: usize, ) -> EnrichedClientResult>; /// Returns finalized L1 block number. async fn finalized_block_number(&self) -> EnrichedClientResult; + + async fn get_total_priority_txs(&self) -> Result; /// Returns scheduler verification key hash by verifier address. async fn scheduler_vk_hash(&self, verifier_address: Address) -> Result; @@ -33,20 +39,21 @@ pub trait EthClient: 'static + fmt::Debug + Send + Sync { &self, packed_version: H256, ) -> EnrichedClientResult>>; - /// Sets list of topics to return events for. - fn set_topics(&mut self, topics: Vec); + + async fn chain_id(&self) -> EnrichedClientResult; } pub const RETRY_LIMIT: usize = 5; const TOO_MANY_RESULTS_INFURA: &str = "query returned more than"; const TOO_MANY_RESULTS_ALCHEMY: &str = "response size exceeded"; -const TOO_MANY_RESULTS_RETH: &str = "query exceeds max block range"; +const TOO_MANY_RESULTS_RETH: &str = "length limit exceeded"; +const TOO_BIG_RANGE_RETH: &str = "query exceeds max block range"; +const TOO_MANY_RESULTS_CHAINSTACK: &str = "range limit exceeded"; /// Implementation of [`EthClient`] based on HTTP JSON-RPC (encapsulated via [`EthInterface`]). -#[derive(Debug)] +#[derive(Debug, Clone)] pub struct EthHttpQueryClient { client: Box>, - topics: Vec, diamond_proxy_addr: Address, governance_address: Address, new_upgrade_cut_data_signature: H256, @@ -54,6 +61,7 @@ pub struct EthHttpQueryClient { state_transition_manager_address: Option
, chain_admin_address: Option
, verifier_contract_abi: Contract, + getters_facet_contract_abi: Contract, confirmations_for_eth_event: Option, } @@ -73,7 +81,6 @@ impl EthHttpQueryClient { ); Self { client: client.for_component("watch"), - topics: Vec::new(), diamond_proxy_addr, state_transition_manager_address, chain_admin_address, @@ -84,6 +91,7 @@ impl EthHttpQueryClient { .unwrap() .signature(), verifier_contract_abi: verifier_contract(), + getters_facet_contract_abi: getters_facet_contract(), confirmations_for_eth_event, } } @@ -142,6 +150,8 @@ impl EthHttpQueryClient { if err_message.contains(TOO_MANY_RESULTS_INFURA) || err_message.contains(TOO_MANY_RESULTS_ALCHEMY) || err_message.contains(TOO_MANY_RESULTS_RETH) + || err_message.contains(TOO_BIG_RANGE_RETH) + || err_message.contains(TOO_MANY_RESULTS_CHAINSTACK) { // get the numeric block ids let from_number = match from { @@ -249,13 +259,15 @@ impl EthClient for EthHttpQueryClient { &self, from: BlockNumber, to: BlockNumber, + topic1: H256, + topic2: Option, retries_left: usize, ) -> EnrichedClientResult> { self.get_events_inner( from, to, - Some(self.topics.clone()), - None, + Some(vec![topic1]), + topic2.map(|topic2| vec![topic2]), Some(self.get_default_address_list()), retries_left, ) @@ -283,7 +295,15 @@ impl EthClient for EthHttpQueryClient { } } - fn set_topics(&mut self, topics: Vec) { - self.topics = topics; + async fn get_total_priority_txs(&self) -> Result { + CallFunctionArgs::new("getTotalPriorityTxs", ()) + .for_contract(self.diamond_proxy_addr, &self.getters_facet_contract_abi) + .call(&self.client) + .await + .map(|x: U256| x.try_into().unwrap()) + } + + async fn chain_id(&self) -> EnrichedClientResult { + Ok(self.client.fetch_chain_id().await?) } } diff --git a/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs index dff10662e98..aa43e7239f8 100644 --- a/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs +++ b/core/node/eth_watch/src/event_processors/decentralized_upgrades.rs @@ -1,5 +1,5 @@ use anyhow::Context as _; -use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; use zksync_types::{ ethabi::Contract, protocol_version::ProtocolSemanticVersion, web3::Log, ProtocolUpgrade, H256, U256, @@ -7,7 +7,7 @@ use zksync_types::{ use crate::{ client::EthClient, - event_processors::{EventProcessor, EventProcessorError}, + event_processors::{EventProcessor, EventProcessorError, EventsSource}, metrics::{PollStage, METRICS}, }; @@ -40,18 +40,18 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - client: &dyn EthClient, + sl_client: &dyn EthClient, events: Vec, - ) -> Result<(), EventProcessorError> { + ) -> Result { let mut upgrades = Vec::new(); - for event in events { + for event in &events { let version = event.topics.get(1).copied().context("missing topic 1")?; let timestamp: u64 = U256::from_big_endian(&event.data.0) .try_into() .ok() .context("upgrade timestamp is too big")?; - let diamond_cut = client + let diamond_cut = sl_client .diamond_cut_by_version(version) .await? .context("missing upgrade data on STM")?; @@ -62,7 +62,7 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { }; // Scheduler VK is not present in proposal event. It is hard coded in verifier contract. let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { - Some(client.scheduler_vk_hash(address).await?) + Some(sl_client.scheduler_vk_hash(address).await?) } else { None }; @@ -75,7 +75,7 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { .collect(); let Some((last_upgrade, _)) = new_upgrades.last() else { - return Ok(()); + return Ok(events.len()); }; let versions: Vec<_> = new_upgrades .iter() @@ -125,10 +125,18 @@ impl EventProcessor for DecentralizedUpgradesEventProcessor { stage_latency.observe(); self.last_seen_protocol_version = last_version; - Ok(()) + Ok(events.len()) } fn relevant_topic(&self) -> H256 { self.update_upgrade_timestamp_signature } + + fn event_source(&self) -> EventsSource { + EventsSource::SL + } + + fn event_type(&self) -> EventType { + EventType::ProtocolUpgrades + } } diff --git a/core/node/eth_watch/src/event_processors/governance_upgrades.rs b/core/node/eth_watch/src/event_processors/governance_upgrades.rs deleted file mode 100644 index 72f5c411892..00000000000 --- a/core/node/eth_watch/src/event_processors/governance_upgrades.rs +++ /dev/null @@ -1,142 +0,0 @@ -use anyhow::Context as _; -use zksync_dal::{Connection, Core, CoreDal, DalError}; -use zksync_types::{ - ethabi::Contract, protocol_upgrade::GovernanceOperation, - protocol_version::ProtocolSemanticVersion, web3::Log, Address, ProtocolUpgrade, H256, -}; - -use crate::{ - client::EthClient, - event_processors::{EventProcessor, EventProcessorError}, - metrics::{PollStage, METRICS}, -}; - -/// Listens to operation events coming from the governance contract and saves new protocol upgrade proposals to the database. -#[derive(Debug)] -pub struct GovernanceUpgradesEventProcessor { - // ZKsync diamond proxy - target_contract_address: Address, - /// Last protocol version seen. Used to skip events for already known upgrade proposals. - last_seen_protocol_version: ProtocolSemanticVersion, - upgrade_proposal_signature: H256, -} - -impl GovernanceUpgradesEventProcessor { - pub fn new( - target_contract_address: Address, - last_seen_protocol_version: ProtocolSemanticVersion, - governance_contract: &Contract, - ) -> Self { - Self { - target_contract_address, - last_seen_protocol_version, - upgrade_proposal_signature: governance_contract - .event("TransparentOperationScheduled") - .context("TransparentOperationScheduled event is missing in ABI") - .unwrap() - .signature(), - } - } -} - -#[async_trait::async_trait] -impl EventProcessor for GovernanceUpgradesEventProcessor { - async fn process_events( - &mut self, - storage: &mut Connection<'_, Core>, - client: &dyn EthClient, - events: Vec, - ) -> Result<(), EventProcessorError> { - let mut upgrades = Vec::new(); - for event in events { - assert_eq!(event.topics[0], self.upgrade_proposal_signature); // guaranteed by the watcher - - let governance_operation = GovernanceOperation::try_from(event) - .map_err(|err| EventProcessorError::log_parse(err, "governance operation"))?; - // Some calls can target other contracts than Diamond proxy, skip them. - for call in governance_operation - .calls - .into_iter() - .filter(|call| call.target == self.target_contract_address) - { - // We might not get an upgrade operation here, but something else instead - // (e.g. `acceptGovernor` call), so if parsing doesn't work, just skip the call. - let Ok(upgrade) = ProtocolUpgrade::try_from(call) else { - tracing::warn!( - "Failed to parse governance operation call as protocol upgrade, skipping" - ); - continue; - }; - // Scheduler VK is not present in proposal event. It is hard coded in verifier contract. - let scheduler_vk_hash = if let Some(address) = upgrade.verifier_address { - Some(client.scheduler_vk_hash(address).await?) - } else { - None - }; - upgrades.push((upgrade, scheduler_vk_hash)); - } - } - - let new_upgrades: Vec<_> = upgrades - .into_iter() - .skip_while(|(v, _)| v.version <= self.last_seen_protocol_version) - .collect(); - - let Some((last_upgrade, _)) = new_upgrades.last() else { - return Ok(()); - }; - let versions: Vec<_> = new_upgrades - .iter() - .map(|(u, _)| u.version.to_string()) - .collect(); - tracing::debug!("Received upgrades with versions: {versions:?}"); - - let last_version = last_upgrade.version; - let stage_latency = METRICS.poll_eth_node[&PollStage::PersistUpgrades].start(); - for (upgrade, scheduler_vk_hash) in new_upgrades { - let latest_semantic_version = storage - .protocol_versions_dal() - .latest_semantic_version() - .await - .map_err(DalError::generalize)? - .context("expected some version to be present in DB")?; - - if upgrade.version > latest_semantic_version { - let latest_version = storage - .protocol_versions_dal() - .get_protocol_version_with_latest_patch(latest_semantic_version.minor) - .await - .map_err(DalError::generalize)? - .with_context(|| { - format!( - "expected minor version {} to be present in DB", - latest_semantic_version.minor as u16 - ) - })?; - - let new_version = latest_version.apply_upgrade(upgrade, scheduler_vk_hash); - if new_version.version.minor == latest_semantic_version.minor { - // Only verification parameters may change if only patch is bumped. - assert_eq!( - new_version.base_system_contracts_hashes, - latest_version.base_system_contracts_hashes - ); - assert!(new_version.tx.is_none()); - } - storage - .protocol_versions_dal() - .save_protocol_version_with_tx(&new_version) - .await - .map_err(DalError::generalize)?; - } - } - stage_latency.observe(); - - self.last_seen_protocol_version = last_version; - Ok(()) - } - - fn relevant_topic(&self) -> H256 { - self.upgrade_proposal_signature - } -} diff --git a/core/node/eth_watch/src/event_processors/mod.rs b/core/node/eth_watch/src/event_processors/mod.rs index 43ae259305a..f145181b0cf 100644 --- a/core/node/eth_watch/src/event_processors/mod.rs +++ b/core/node/eth_watch/src/event_processors/mod.rs @@ -1,18 +1,17 @@ use std::fmt; -use zksync_dal::{Connection, Core}; +use zksync_dal::{eth_watcher_dal::EventType, Connection, Core}; use zksync_eth_client::{ContractCallError, EnrichedClientError}; use zksync_types::{web3::Log, H256}; pub(crate) use self::{ decentralized_upgrades::DecentralizedUpgradesEventProcessor, - governance_upgrades::GovernanceUpgradesEventProcessor, priority_ops::PriorityOpsEventProcessor, + priority_ops::PriorityOpsEventProcessor, }; use crate::client::EthClient; mod decentralized_upgrades; -mod governance_upgrades; -mod priority_ops; +pub mod priority_ops; /// Errors issued by an [`EventProcessor`]. #[derive(Debug, thiserror::Error)] @@ -32,6 +31,12 @@ pub(super) enum EventProcessorError { Internal(#[from] anyhow::Error), } +#[derive(Debug)] +pub(super) enum EventsSource { + L1, + SL, +} + impl EventProcessorError { pub fn log_parse(source: impl Into, log_kind: &'static str) -> Self { Self::LogParse { @@ -46,13 +51,18 @@ impl EventProcessorError { #[async_trait::async_trait] pub(super) trait EventProcessor: 'static + fmt::Debug + Send + Sync { /// Processes given events. All events are guaranteed to match [`Self::relevant_topic()`]. + /// Returns number of processed events, this result is used to update last processed block. async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - client: &dyn EthClient, + sl_client: &dyn EthClient, events: Vec, - ) -> Result<(), EventProcessorError>; + ) -> Result; /// Relevant topic which defines what events to be processed fn relevant_topic(&self) -> H256; + + fn event_source(&self) -> EventsSource; + + fn event_type(&self) -> EventType; } diff --git a/core/node/eth_watch/src/event_processors/priority_ops.rs b/core/node/eth_watch/src/event_processors/priority_ops.rs index 2783637fdb9..051c076850e 100644 --- a/core/node/eth_watch/src/event_processors/priority_ops.rs +++ b/core/node/eth_watch/src/event_processors/priority_ops.rs @@ -2,13 +2,13 @@ use std::convert::TryFrom; use anyhow::Context; use zksync_contracts::hyperchain_contract; -use zksync_dal::{Connection, Core, CoreDal, DalError}; +use zksync_dal::{eth_watcher_dal::EventType, Connection, Core, CoreDal, DalError}; use zksync_shared_metrics::{TxStage, APP_METRICS}; use zksync_types::{l1::L1Tx, web3::Log, PriorityOpId, H256}; use crate::{ client::EthClient, - event_processors::{EventProcessor, EventProcessorError}, + event_processors::{EventProcessor, EventProcessorError, EventsSource}, metrics::{PollStage, METRICS}, }; @@ -36,10 +36,11 @@ impl EventProcessor for PriorityOpsEventProcessor { async fn process_events( &mut self, storage: &mut Connection<'_, Core>, - _client: &dyn EthClient, + sl_client: &dyn EthClient, events: Vec, - ) -> Result<(), EventProcessorError> { + ) -> Result { let mut priority_ops = Vec::new(); + let events_count = events.len(); for event in events { assert_eq!(event.topics[0], self.new_priority_request_signature); // guaranteed by the watcher let tx = L1Tx::try_from(event) @@ -48,7 +49,7 @@ impl EventProcessor for PriorityOpsEventProcessor { } if priority_ops.is_empty() { - return Ok(()); + return Ok(events_count); } let first = &priority_ops[0]; @@ -70,33 +71,49 @@ impl EventProcessor for PriorityOpsEventProcessor { .into_iter() .skip_while(|tx| tx.serial_id() < self.next_expected_priority_id) .collect(); - let (Some(first_new), Some(last_new)) = (new_ops.first(), new_ops.last()) else { - return Ok(()); + let skipped_ops = events_count - new_ops.len(); + let Some(first_new) = new_ops.first() else { + return Ok(events_count); }; assert_eq!( first_new.serial_id(), self.next_expected_priority_id, "priority transaction serial id mismatch" ); - let next_expected_priority_id = last_new.serial_id().next(); let stage_latency = METRICS.poll_eth_node[&PollStage::PersistL1Txs].start(); APP_METRICS.processed_txs[&TxStage::added_to_mempool()].inc(); APP_METRICS.processed_l1_txs[&TxStage::added_to_mempool()].inc(); - for new_op in new_ops { - let eth_block = new_op.eth_block(); + let processed_priority_transactions = sl_client.get_total_priority_txs().await?; + let ops_to_insert: Vec<&L1Tx> = new_ops + .iter() + .take_while(|op| processed_priority_transactions > op.serial_id().0) + .collect(); + + for new_op in &ops_to_insert { storage .transactions_dal() - .insert_transaction_l1(&new_op, eth_block) + .insert_transaction_l1(new_op, new_op.eth_block()) .await .map_err(DalError::generalize)?; } stage_latency.observe(); - self.next_expected_priority_id = next_expected_priority_id; - Ok(()) + if let Some(last_op) = ops_to_insert.last() { + self.next_expected_priority_id = last_op.serial_id().next(); + } + + Ok(skipped_ops + ops_to_insert.len()) } fn relevant_topic(&self) -> H256 { self.new_priority_request_signature } + + fn event_source(&self) -> EventsSource { + EventsSource::L1 + } + + fn event_type(&self) -> EventType { + EventType::PriorityTransactions + } } diff --git a/core/node/eth_watch/src/lib.rs b/core/node/eth_watch/src/lib.rs index e964d63bb19..a832733b355 100644 --- a/core/node/eth_watch/src/lib.rs +++ b/core/node/eth_watch/src/lib.rs @@ -6,23 +6,20 @@ use std::time::Duration; use anyhow::Context as _; use tokio::sync::watch; -use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; +use zksync_dal::{Connection, ConnectionPool, Core, CoreDal, DalError}; use zksync_system_constants::PRIORITY_EXPIRATION; use zksync_types::{ ethabi::Contract, protocol_version::ProtocolSemanticVersion, - web3::BlockNumber as Web3BlockNumber, Address, PriorityOpId, + web3::BlockNumber as Web3BlockNumber, PriorityOpId, }; pub use self::client::EthHttpQueryClient; use self::{ client::{EthClient, RETRY_LIMIT}, - event_processors::{ - EventProcessor, EventProcessorError, GovernanceUpgradesEventProcessor, - PriorityOpsEventProcessor, - }, - metrics::{PollStage, METRICS}, + event_processors::{EventProcessor, EventProcessorError, PriorityOpsEventProcessor}, + metrics::METRICS, }; -use crate::event_processors::DecentralizedUpgradesEventProcessor; +use crate::event_processors::{DecentralizedUpgradesEventProcessor, EventsSource}; mod client; mod event_processors; @@ -34,70 +31,53 @@ mod tests; struct EthWatchState { last_seen_protocol_version: ProtocolSemanticVersion, next_expected_priority_id: PriorityOpId, - last_processed_ethereum_block: u64, } /// Ethereum watcher component. #[derive(Debug)] pub struct EthWatch { - client: Box, + l1_client: Box, + sl_client: Box, poll_interval: Duration, event_processors: Vec>, - last_processed_ethereum_block: u64, pool: ConnectionPool, } impl EthWatch { pub async fn new( - diamond_proxy_addr: Address, - governance_contract: &Contract, chain_admin_contract: &Contract, - mut client: Box, + l1_client: Box, + sl_client: Box, pool: ConnectionPool, poll_interval: Duration, ) -> anyhow::Result { let mut storage = pool.connection_tagged("eth_watch").await?; - let state = Self::initialize_state(&*client, &mut storage).await?; + let state = Self::initialize_state(&mut storage).await?; tracing::info!("initialized state: {state:?}"); drop(storage); let priority_ops_processor = PriorityOpsEventProcessor::new(state.next_expected_priority_id)?; - let governance_upgrades_processor = GovernanceUpgradesEventProcessor::new( - diamond_proxy_addr, - state.last_seen_protocol_version, - governance_contract, - ); let decentralized_upgrades_processor = DecentralizedUpgradesEventProcessor::new( state.last_seen_protocol_version, chain_admin_contract, ); let event_processors: Vec> = vec![ Box::new(priority_ops_processor), - Box::new(governance_upgrades_processor), Box::new(decentralized_upgrades_processor), ]; - let topics = event_processors - .iter() - .map(|processor| processor.relevant_topic()) - .collect(); - client.set_topics(topics); - Ok(Self { - client, + l1_client, + sl_client, poll_interval, event_processors, - last_processed_ethereum_block: state.last_processed_ethereum_block, pool, }) } #[tracing::instrument(name = "EthWatch::initialize_state", skip_all)] - async fn initialize_state( - client: &dyn EthClient, - storage: &mut Connection<'_, Core>, - ) -> anyhow::Result { + async fn initialize_state(storage: &mut Connection<'_, Core>) -> anyhow::Result { let next_expected_priority_id: PriorityOpId = storage .transactions_dal() .last_priority_id() @@ -110,26 +90,9 @@ impl EthWatch { .await? .context("expected at least one (genesis) version to be present in DB")?; - let last_processed_ethereum_block = match storage - .transactions_dal() - .get_last_processed_l1_block() - .await? - { - // There are some priority ops processed - start from the last processed eth block - // but subtract 1 in case the server stopped mid-block. - Some(block) => block.0.saturating_sub(1).into(), - // There are no priority ops processed - to be safe, scan the last 50k blocks. - None => client - .finalized_block_number() - .await - .context("cannot get current Ethereum block")? - .saturating_sub(PRIORITY_EXPIRATION), - }; - Ok(EthWatchState { next_expected_priority_id, last_seen_protocol_version, - last_processed_ethereum_block, }) } @@ -155,10 +118,6 @@ impl EthWatch { // This is an error because otherwise we could potentially miss a priority operation // thus entering priority mode, which is not desired. tracing::error!("Failed to process new blocks: {err}"); - self.last_processed_ethereum_block = - Self::initialize_state(&*self.client, &mut storage) - .await? - .last_processed_ethereum_block; } } } @@ -172,34 +131,64 @@ impl EthWatch { &mut self, storage: &mut Connection<'_, Core>, ) -> Result<(), EventProcessorError> { - let stage_latency = METRICS.poll_eth_node[&PollStage::Request].start(); - let to_block = self.client.finalized_block_number().await?; - if to_block <= self.last_processed_ethereum_block { - return Ok(()); - } - - let events = self - .client - .get_events( - Web3BlockNumber::Number(self.last_processed_ethereum_block.into()), - Web3BlockNumber::Number(to_block.into()), - RETRY_LIMIT, - ) - .await?; - stage_latency.observe(); - for processor in &mut self.event_processors { - let relevant_topic = processor.relevant_topic(); - let processor_events = events - .iter() - .filter(|event| event.topics.first() == Some(&relevant_topic)) - .cloned() - .collect(); - processor - .process_events(storage, &*self.client, processor_events) + let client = match processor.event_source() { + EventsSource::L1 => self.l1_client.as_ref(), + EventsSource::SL => self.sl_client.as_ref(), + }; + let chain_id = client.chain_id().await?; + let finalized_block = client.finalized_block_number().await?; + + let from_block = storage + .processed_events_dal() + .get_or_set_next_block_to_process( + processor.event_type(), + chain_id, + finalized_block.saturating_sub(PRIORITY_EXPIRATION), + ) + .await + .map_err(DalError::generalize)?; + + // There are no new blocks so there is nothing to be done + if from_block > finalized_block { + continue; + } + let processor_events = client + .get_events( + Web3BlockNumber::Number(from_block.into()), + Web3BlockNumber::Number(finalized_block.into()), + processor.relevant_topic(), + None, + RETRY_LIMIT, + ) + .await?; + let processed_events_count = processor + .process_events(storage, &*self.sl_client, processor_events.clone()) .await?; + + let next_block_to_process = if processed_events_count == processor_events.len() { + finalized_block + 1 + } else if processed_events_count == 0 { + //nothing was processed + from_block + } else { + processor_events[processed_events_count - 1] + .block_number + .expect("Event block number is missing") + .try_into() + .unwrap() + }; + + storage + .processed_events_dal() + .update_next_block_to_process( + processor.event_type(), + chain_id, + next_block_to_process, + ) + .await + .map_err(DalError::generalize)?; } - self.last_processed_ethereum_block = to_block; Ok(()) } } diff --git a/core/node/eth_watch/src/metrics.rs b/core/node/eth_watch/src/metrics.rs index a3684cc6e72..a942d4a6e61 100644 --- a/core/node/eth_watch/src/metrics.rs +++ b/core/node/eth_watch/src/metrics.rs @@ -7,7 +7,6 @@ use vise::{Buckets, Counter, EncodeLabelSet, EncodeLabelValue, Family, Histogram #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EncodeLabelValue, EncodeLabelSet)] #[metrics(label = "stage", rename_all = "snake_case")] pub(super) enum PollStage { - Request, PersistL1Txs, PersistUpgrades, } diff --git a/core/node/eth_watch/src/tests.rs b/core/node/eth_watch/src/tests.rs index 7ae3b5494e9..feb9eff35b5 100644 --- a/core/node/eth_watch/src/tests.rs +++ b/core/node/eth_watch/src/tests.rs @@ -1,37 +1,48 @@ use std::{collections::HashMap, convert::TryInto, sync::Arc}; use tokio::sync::RwLock; -use zksync_contracts::{chain_admin_contract, governance_contract, hyperchain_contract}; +use zksync_contracts::{ + chain_admin_contract, hyperchain_contract, state_transition_manager_contract, +}; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_eth_client::{ContractCallError, EnrichedClientResult}; use zksync_types::{ - abi, ethabi, - ethabi::{Hash, Token}, + abi, + abi::ProposedUpgrade, + ethabi, + ethabi::Token, l1::{L1Tx, OpProcessingType, PriorityQueueType}, protocol_upgrade::{ProtocolUpgradeTx, ProtocolUpgradeTxCommonData}, protocol_version::ProtocolSemanticVersion, - web3::{BlockNumber, Log}, + web3::{contract::Tokenizable, BlockNumber, Log}, Address, Execute, L1TxCommonData, PriorityOpId, ProtocolUpgrade, ProtocolVersion, - ProtocolVersionId, Transaction, H256, U256, + ProtocolVersionId, SLChainId, Transaction, H160, H256, U256, U64, }; -use crate::{client::EthClient, EthWatch}; +use crate::{ + client::{EthClient, RETRY_LIMIT}, + EthWatch, +}; #[derive(Debug)] struct FakeEthClientData { transactions: HashMap>, diamond_upgrades: HashMap>, - governance_upgrades: HashMap>, + upgrade_timestamp: HashMap>, last_finalized_block_number: u64, + chain_id: SLChainId, + processed_priority_transactions_count: u64, } impl FakeEthClientData { - fn new() -> Self { + fn new(chain_id: SLChainId) -> Self { Self { transactions: Default::default(), diamond_upgrades: Default::default(), - governance_upgrades: Default::default(), + upgrade_timestamp: Default::default(), last_finalized_block_number: 0, + chain_id, + processed_priority_transactions_count: 0, } } @@ -42,21 +53,30 @@ impl FakeEthClientData { .entry(eth_block.0 as u64) .or_default() .push(tx_into_log(transaction.clone())); + self.processed_priority_transactions_count += 1; } } - fn add_governance_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { for (upgrade, eth_block) in upgrades { - self.governance_upgrades + self.upgrade_timestamp + .entry(*eth_block) + .or_default() + .push(upgrade_timestamp_log(*eth_block)); + self.diamond_upgrades .entry(*eth_block) .or_default() - .push(upgrade_into_governor_log(upgrade.clone(), *eth_block)); + .push(diamond_upgrade_log(upgrade.clone(), *eth_block)); } } fn set_last_finalized_block_number(&mut self, number: u64) { self.last_finalized_block_number = number; } + + fn set_processed_priority_transactions_count(&mut self, number: u64) { + self.processed_priority_transactions_count = number; + } } #[derive(Debug, Clone)] @@ -65,9 +85,9 @@ struct MockEthClient { } impl MockEthClient { - fn new() -> Self { + fn new(chain_id: SLChainId) -> Self { Self { - inner: Arc::new(RwLock::new(FakeEthClientData::new())), + inner: Arc::new(RwLock::new(FakeEthClientData::new(chain_id))), } } @@ -75,8 +95,8 @@ impl MockEthClient { self.inner.write().await.add_transactions(transactions); } - async fn add_governance_upgrades(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { - self.inner.write().await.add_governance_upgrades(upgrades); + async fn add_upgrade_timestamp(&mut self, upgrades: &[(ProtocolUpgrade, u64)]) { + self.inner.write().await.add_upgrade_timestamp(upgrades); } async fn set_last_finalized_block_number(&mut self, number: u64) { @@ -86,6 +106,13 @@ impl MockEthClient { .set_last_finalized_block_number(number); } + async fn set_processed_priority_transactions_count(&mut self, number: u64) { + self.inner + .write() + .await + .set_processed_priority_transactions_count(number) + } + async fn block_to_number(&self, block: BlockNumber) -> u64 { match block { BlockNumber::Earliest => 0, @@ -104,6 +131,8 @@ impl EthClient for MockEthClient { &self, from: BlockNumber, to: BlockNumber, + topic1: H256, + topic2: Option, _retries_left: usize, ) -> EnrichedClientResult> { let from = self.block_to_number(from).await; @@ -116,15 +145,19 @@ impl EthClient for MockEthClient { if let Some(ops) = self.inner.read().await.diamond_upgrades.get(&number) { logs.extend_from_slice(ops); } - if let Some(ops) = self.inner.read().await.governance_upgrades.get(&number) { + if let Some(ops) = self.inner.read().await.upgrade_timestamp.get(&number) { logs.extend_from_slice(ops); } } - Ok(logs) + Ok(logs + .into_iter() + .filter(|log| { + log.topics.first() == Some(&topic1) + && (topic2.is_none() || log.topics.get(1) == topic2.as_ref()) + }) + .collect()) } - fn set_topics(&mut self, _topics: Vec) {} - async fn scheduler_vk_hash( &self, _verifier_address: Address, @@ -138,16 +171,58 @@ impl EthClient for MockEthClient { async fn diamond_cut_by_version( &self, - _packed_version: H256, + packed_version: H256, ) -> EnrichedClientResult>> { - unimplemented!() + let from_block = *self + .inner + .read() + .await + .diamond_upgrades + .keys() + .min() + .unwrap_or(&0); + let to_block = *self + .inner + .read() + .await + .diamond_upgrades + .keys() + .max() + .unwrap_or(&0); + + let logs = self + .get_events( + U64::from(from_block).into(), + U64::from(to_block).into(), + state_transition_manager_contract() + .event("NewUpgradeCutData") + .unwrap() + .signature(), + Some(packed_version), + RETRY_LIMIT, + ) + .await?; + + Ok(logs.into_iter().next().map(|log| log.data.0)) + } + + async fn get_total_priority_txs(&self) -> Result { + Ok(self + .inner + .read() + .await + .processed_priority_transactions_count) + } + + async fn chain_id(&self) -> EnrichedClientResult { + Ok(self.inner.read().await.chain_id) } } fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { let tx = L1Tx { execute: Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), @@ -178,7 +253,7 @@ fn build_l1_tx(serial_id: u64, eth_block: u64) -> L1Tx { fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx { let tx = ProtocolUpgradeTx { execute: Execute { - contract_address: Address::repeat_byte(0x11), + contract_address: Some(Address::repeat_byte(0x11)), calldata: vec![1, 2, 3], factory_deps: vec![], value: U256::zero(), @@ -203,27 +278,47 @@ fn build_upgrade_tx(id: ProtocolVersionId, eth_block: u64) -> ProtocolUpgradeTx .unwrap() } -async fn create_test_watcher(connection_pool: ConnectionPool) -> (EthWatch, MockEthClient) { - let client = MockEthClient::new(); +async fn create_test_watcher( + connection_pool: ConnectionPool, + is_gateway: bool, +) -> (EthWatch, MockEthClient, MockEthClient) { + let l1_client = MockEthClient::new(SLChainId(42)); + let sl_client = if is_gateway { + MockEthClient::new(SLChainId(123)) + } else { + l1_client.clone() + }; let watcher = EthWatch::new( - Address::default(), - &governance_contract(), &chain_admin_contract(), - Box::new(client.clone()), + Box::new(l1_client.clone()), + Box::new(sl_client.clone()), connection_pool, std::time::Duration::from_nanos(1), ) .await .unwrap(); - (watcher, client) + (watcher, l1_client, sl_client) } -#[tokio::test] +async fn create_l1_test_watcher( + connection_pool: ConnectionPool, +) -> (EthWatch, MockEthClient) { + let (watcher, l1_client, _) = create_test_watcher(connection_pool, false).await; + (watcher, l1_client) +} + +async fn create_gateway_test_watcher( + connection_pool: ConnectionPool, +) -> (EthWatch, MockEthClient, MockEthClient) { + create_test_watcher(connection_pool, true).await +} + +#[test_log::test(tokio::test)] async fn test_normal_operation_l1_txs() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -258,15 +353,15 @@ async fn test_normal_operation_l1_txs() { assert_eq!(db_tx.common_data.serial_id.0, 2); } -#[tokio::test] -async fn test_gap_in_governance_upgrades() { +#[test_log::test(tokio::test)] +async fn test_gap_in_upgrade_timestamp() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client - .add_governance_upgrades(&[( + .add_upgrade_timestamp(&[( ProtocolUpgrade { version: ProtocolSemanticVersion { minor: ProtocolVersionId::next(), @@ -291,18 +386,17 @@ async fn test_gap_in_governance_upgrades() { assert_eq!(db_versions[1].minor, next_version); } -#[tokio::test] -async fn test_normal_operation_governance_upgrades() { +#[test_log::test(tokio::test)] +async fn test_normal_operation_upgrade_timestamp() { zksync_concurrency::testonly::abort_on_panic(); let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let mut client = MockEthClient::new(); + let mut client = MockEthClient::new(SLChainId(42)); let mut watcher = EthWatch::new( - Address::default(), - &governance_contract(), &chain_admin_contract(), Box::new(client.clone()), + Box::new(client.clone()), connection_pool.clone(), std::time::Duration::from_nanos(1), ) @@ -311,7 +405,7 @@ async fn test_normal_operation_governance_upgrades() { let mut storage = connection_pool.connection().await.unwrap(); client - .add_governance_upgrades(&[ + .add_upgrade_timestamp(&[ ( ProtocolUpgrade { tx: None, @@ -375,12 +469,12 @@ async fn test_normal_operation_governance_upgrades() { assert_eq!(tx.common_data.upgrade_id, ProtocolVersionId::next()); } -#[tokio::test] +#[test_log::test(tokio::test)] #[should_panic] async fn test_gap_in_single_batch() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -396,12 +490,12 @@ async fn test_gap_in_single_batch() { watcher.loop_iteration(&mut storage).await.unwrap(); } -#[tokio::test] +#[test_log::test(tokio::test)] #[should_panic] async fn test_gap_between_batches() { let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -424,12 +518,12 @@ async fn test_gap_between_batches() { watcher.loop_iteration(&mut storage).await.unwrap(); } -#[tokio::test] +#[test_log::test(tokio::test)] async fn test_overlapping_batches() { zksync_concurrency::testonly::abort_on_panic(); let connection_pool = ConnectionPool::::test_pool().await; setup_db(&connection_pool).await; - let (mut watcher, mut client) = create_test_watcher(connection_pool.clone()).await; + let (mut watcher, mut client) = create_l1_test_watcher(connection_pool.clone()).await; let mut storage = connection_pool.connection().await.unwrap(); client @@ -467,6 +561,52 @@ async fn test_overlapping_batches() { assert_eq!(tx.common_data.serial_id.0, 4); } +#[test_log::test(tokio::test)] +async fn test_transactions_get_gradually_processed_by_gateway() { + zksync_concurrency::testonly::abort_on_panic(); + let connection_pool = ConnectionPool::::test_pool().await; + setup_db(&connection_pool).await; + let (mut watcher, mut l1_client, mut gateway_client) = + create_gateway_test_watcher(connection_pool.clone()).await; + + let mut storage = connection_pool.connection().await.unwrap(); + l1_client + .add_transactions(&[ + build_l1_tx(0, 10), + build_l1_tx(1, 14), + build_l1_tx(2, 14), + build_l1_tx(3, 20), + build_l1_tx(4, 22), + ]) + .await; + l1_client.set_last_finalized_block_number(15).await; + gateway_client + .set_processed_priority_transactions_count(2) + .await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 2); + + l1_client.set_last_finalized_block_number(25).await; + gateway_client + .set_processed_priority_transactions_count(4) + .await; + watcher.loop_iteration(&mut storage).await.unwrap(); + + let db_txs = get_all_db_txs(&mut storage).await; + assert_eq!(db_txs.len(), 4); + let mut db_txs: Vec = db_txs + .into_iter() + .map(|tx| tx.try_into().unwrap()) + .collect(); + db_txs.sort_by_key(|tx| tx.common_data.serial_id); + let tx = db_txs[2].clone(); + assert_eq!(tx.common_data.serial_id.0, 2); + let tx = db_txs[3].clone(); + assert_eq!(tx.common_data.serial_id.0, 3); +} + async fn get_all_db_txs(storage: &mut Connection<'_, Core>) -> Vec { storage.transactions_dal().reset_mempool().await.unwrap(); storage @@ -518,37 +658,69 @@ fn tx_into_log(tx: L1Tx) -> Log { } } -fn upgrade_into_governor_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { - let diamond_cut = upgrade_into_diamond_cut(upgrade); +fn init_calldata(protocol_upgrade: ProtocolUpgrade) -> Vec { + let upgrade_token = upgrade_into_diamond_cut(protocol_upgrade); + + let encoded_params = ethabi::encode(&[upgrade_token]); + let execute_upgrade_selector = hyperchain_contract() .function("executeUpgrade") .unwrap() .short_signature(); - let diamond_upgrade_calldata = execute_upgrade_selector - .iter() - .copied() - .chain(ethabi::encode(&[diamond_cut])) - .collect(); - let governance_call = Token::Tuple(vec![ - Token::Address(Default::default()), - Token::Uint(U256::default()), - Token::Bytes(diamond_upgrade_calldata), - ]); - let governance_operation = Token::Tuple(vec![ - Token::Array(vec![governance_call]), - Token::FixedBytes(vec![0u8; 32]), - Token::FixedBytes(vec![0u8; 32]), - ]); - let final_data = ethabi::encode(&[Token::FixedBytes(vec![0u8; 32]), governance_operation]); + + // Concatenate the function selector with the encoded parameters + let mut calldata = Vec::with_capacity(4 + encoded_params.len()); + calldata.extend_from_slice(&execute_upgrade_selector); + calldata.extend_from_slice(&encoded_params); + + calldata +} + +fn diamond_upgrade_log(upgrade: ProtocolUpgrade, eth_block: u64) -> Log { + // struct DiamondCutData { + // FacetCut[] facetCuts; + // address initAddress; + // bytes initCalldata; + // } + let final_data = ethabi::encode(&[Token::Tuple(vec![ + Token::Array(vec![]), + Token::Address(H160::zero()), + Token::Bytes(init_calldata(upgrade.clone())), + ])]); + tracing::info!("{:?}", Token::Bytes(init_calldata(upgrade))); + + Log { + address: Address::repeat_byte(0x1), + topics: vec![ + state_transition_manager_contract() + .event("NewUpgradeCutData") + .unwrap() + .signature(), + H256::from_low_u64_be(eth_block), + ], + data: final_data.into(), + block_hash: Some(H256::repeat_byte(0x11)), + block_number: Some(eth_block.into()), + transaction_hash: Some(H256::random()), + transaction_index: Some(0u64.into()), + log_index: Some(0u64.into()), + transaction_log_index: Some(0u64.into()), + log_type: None, + removed: None, + block_timestamp: None, + } +} +fn upgrade_timestamp_log(eth_block: u64) -> Log { + let final_data = ethabi::encode(&[U256::from(12345).into_token()]); Log { address: Address::repeat_byte(0x1), topics: vec![ - governance_contract() - .event("TransparentOperationScheduled") - .expect("TransparentOperationScheduled event is missing in abi") + chain_admin_contract() + .event("UpdateUpgradeTimestamp") + .expect("UpdateUpgradeTimestamp event is missing in ABI") .signature(), - Default::default(), + H256::from_low_u64_be(eth_block), ], data: final_data.into(), block_hash: Some(H256::repeat_byte(0x11)), @@ -577,7 +749,7 @@ fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { else { unreachable!() }; - let upgrade_token = abi::ProposedUpgrade { + ProposedUpgrade { l2_protocol_upgrade_tx: tx, factory_deps, bootloader_hash: upgrade.bootloader_code_hash.unwrap_or_default().into(), @@ -589,17 +761,7 @@ fn upgrade_into_diamond_cut(upgrade: ProtocolUpgrade) -> Token { upgrade_timestamp: upgrade.timestamp.into(), new_protocol_version: upgrade.version.pack(), } - .encode(); - Token::Tuple(vec![ - Token::Array(vec![]), - Token::Address(Default::default()), - Token::Bytes( - vec![0u8; 4] - .into_iter() - .chain(ethabi::encode(&[upgrade_token])) - .collect(), - ), - ]) + .encode() } async fn setup_db(connection_pool: &ConnectionPool) { diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs index e6842b92fdb..27cdc7f5d5e 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/mod.rs @@ -317,14 +317,14 @@ impl TxParamsProvider for GasAdjuster { // smooth out base_fee increases in general. // In other words, in order to pay less fees, we are ready to wait longer. // But the longer we wait, the more we are ready to pay. - fn get_base_fee(&self, time_in_mempool: u32) -> u64 { + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64 { let a = self.config.pricing_formula_parameter_a; let b = self.config.pricing_formula_parameter_b; // Currently we use an exponential formula. // The alternative is a linear one: - // `let scale_factor = a + b * time_in_mempool as f64;` - let scale_factor = a * b.powf(time_in_mempool as f64); + // `let scale_factor = a + b * time_in_mempool_in_l1_blocks as f64;` + let scale_factor = a * b.powf(time_in_mempool_in_l1_blocks as f64); let median = self.base_fee_statistics.median(); METRICS.median_base_fee_per_gas.set(median); let new_fee = median as f64 * scale_factor; diff --git a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs index 2643e4b3c42..47023203de0 100644 --- a/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs +++ b/core/node/fee_model/src/l1_gas_price/gas_adjuster/tests.rs @@ -152,7 +152,7 @@ async fn kept_updated_l2(commitment_mode: L1BatchCommitmentMode) { .zip(TEST_PUBDATA_PRICES) .map(|(block, pubdata)| BaseFees { base_fee_per_gas: block, - base_fee_per_blob_gas: 0.into(), + base_fee_per_blob_gas: 1.into(), l2_pubdata_price: pubdata.into(), }) .collect(); diff --git a/core/node/fee_model/src/l1_gas_price/mod.rs b/core/node/fee_model/src/l1_gas_price/mod.rs index 2a5d63089ca..e23bccf27ee 100644 --- a/core/node/fee_model/src/l1_gas_price/mod.rs +++ b/core/node/fee_model/src/l1_gas_price/mod.rs @@ -16,7 +16,7 @@ mod main_node_fetcher; /// This trait, as a bound, should only be used in components that actually sign and send transactions. pub trait TxParamsProvider: fmt::Debug + 'static + Send + Sync { /// Returns the recommended `max_fee_per_gas` value (EIP1559). - fn get_base_fee(&self, time_in_mempool: u32) -> u64; + fn get_base_fee(&self, time_in_mempool_in_l1_blocks: u32) -> u64; /// Returns the recommended `max_priority_fee_per_gas` value (EIP1559). fn get_priority_fee(&self) -> u64; diff --git a/core/node/fee_model/src/lib.rs b/core/node/fee_model/src/lib.rs index 217ed71e38c..fe4f6a27ce2 100644 --- a/core/node/fee_model/src/lib.rs +++ b/core/node/fee_model/src/lib.rs @@ -34,8 +34,27 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { l1_pubdata_price_scale_factor: f64, ) -> anyhow::Result { let params = self.get_fee_model_params(); + Ok( + ::default_batch_fee_input_scaled( + params, + l1_gas_price_scale_factor, + l1_pubdata_price_scale_factor, + ), + ) + } + + /// Returns the fee model parameters using the denomination of the base token used (WEI for ETH). + fn get_fee_model_params(&self) -> FeeParams; +} - Ok(match params { +impl dyn BatchFeeModelInputProvider { + /// Provides the default implementation of `get_batch_fee_input_scaled()` given [`FeeParams`]. + pub fn default_batch_fee_input_scaled( + params: FeeParams, + l1_gas_price_scale_factor: f64, + l1_pubdata_price_scale_factor: f64, + ) -> BatchFeeInput { + match params { FeeParams::V1(params) => BatchFeeInput::L1Pegged(compute_batch_fee_model_input_v1( params, l1_gas_price_scale_factor, @@ -47,14 +66,9 @@ pub trait BatchFeeModelInputProvider: fmt::Debug + 'static + Send + Sync { l1_pubdata_price_scale_factor, )), ), - }) + } } - /// Returns the fee model parameters using the denomination of the base token used (WEI for ETH). - fn get_fee_model_params(&self) -> FeeParams; -} - -impl dyn BatchFeeModelInputProvider { /// Returns the batch fee input as-is, i.e. without any scaling for the L1 gas and pubdata prices. pub async fn get_batch_fee_input(&self) -> anyhow::Result { self.get_batch_fee_input_scaled(1.0, 1.0).await diff --git a/core/node/node_framework/src/implementations/layers/da_clients/avail.rs b/core/node/node_framework/src/implementations/layers/da_clients/avail.rs index 7c3d82b6d25..06f5dbb72eb 100644 --- a/core/node/node_framework/src/implementations/layers/da_clients/avail.rs +++ b/core/node/node_framework/src/implementations/layers/da_clients/avail.rs @@ -1,4 +1,4 @@ -use zksync_config::AvailConfig; +use zksync_config::{configs::da_client::avail::AvailSecrets, AvailConfig}; use zksync_da_client::DataAvailabilityClient; use zksync_da_clients::avail::AvailClient; @@ -11,11 +11,12 @@ use crate::{ #[derive(Debug)] pub struct AvailWiringLayer { config: AvailConfig, + secrets: AvailSecrets, } impl AvailWiringLayer { - pub fn new(config: AvailConfig) -> Self { - Self { config } + pub fn new(config: AvailConfig, secrets: AvailSecrets) -> Self { + Self { config, secrets } } } @@ -36,7 +37,7 @@ impl WiringLayer for AvailWiringLayer { async fn wire(self, _input: Self::Input) -> Result { let client: Box = - Box::new(AvailClient::new(self.config).await?); + Box::new(AvailClient::new(self.config, self.secrets).await?); Ok(Self::Output { client: DAClientResource(client), diff --git a/core/node/node_framework/src/implementations/layers/eth_watch.rs b/core/node/node_framework/src/implementations/layers/eth_watch.rs index 53eeb1c5280..e19828d85cc 100644 --- a/core/node/node_framework/src/implementations/layers/eth_watch.rs +++ b/core/node/node_framework/src/implementations/layers/eth_watch.rs @@ -1,5 +1,5 @@ use zksync_config::{ContractsConfig, EthWatchConfig}; -use zksync_contracts::{chain_admin_contract, governance_contract}; +use zksync_contracts::chain_admin_contract; use zksync_eth_watch::{EthHttpQueryClient, EthWatch}; use crate::{ @@ -71,9 +71,8 @@ impl WiringLayer for EthWatchLayer { ); let eth_watch = EthWatch::new( - self.contracts_config.diamond_proxy_addr, - &governance_contract(), &chain_admin_contract(), + Box::new(eth_client.clone()), Box::new(eth_client), main_pool, self.eth_watch_config.poll_interval(), diff --git a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs index f369db2bbf0..920b4fab1d0 100644 --- a/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs +++ b/core/node/node_framework/src/implementations/layers/state_keeper/main_batch_executor.rs @@ -1,5 +1,5 @@ use zksync_types::vm::FastVmMode; -use zksync_vm_executor::batch::MainBatchExecutorFactory; +use zksync_vm_executor::batch::{BatchTracer, MainBatchExecutorFactory, TraceCalls}; use crate::{ implementations::resources::state_keeper::BatchExecutorResource, @@ -27,6 +27,12 @@ impl MainBatchExecutorLayer { self.fast_vm_mode = mode; self } + + fn create_executor(&self) -> BatchExecutorResource { + let mut executor = MainBatchExecutorFactory::::new(self.optional_bytecode_compression); + executor.set_fast_vm_mode(self.fast_vm_mode); + executor.into() + } } #[async_trait::async_trait] @@ -39,11 +45,10 @@ impl WiringLayer for MainBatchExecutorLayer { } async fn wire(self, (): Self::Input) -> Result { - let mut executor = MainBatchExecutorFactory::new( - self.save_call_traces, - self.optional_bytecode_compression, - ); - executor.set_fast_vm_mode(self.fast_vm_mode); - Ok(executor.into()) + Ok(if self.save_call_traces { + self.create_executor::() + } else { + self.create_executor::<()>() + }) } } diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs index 858692d3c85..35c26d909c9 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/bwip.rs @@ -76,7 +76,7 @@ impl WiringLayer for BasicWitnessInputProducerLayer { let connection_pool = master_pool.get_custom(self.config.window_size + 2).await?; // We don't get the executor from the context because it would contain state keeper-specific settings. - let batch_executor = MainBatchExecutorFactory::new(false, false); + let batch_executor = MainBatchExecutorFactory::<()>::new(false); let (basic_witness_input_producer, tasks) = BasicWitnessInputProducer::new( connection_pool, diff --git a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs index ee1be98319b..e4eb8b38a69 100644 --- a/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs +++ b/core/node/node_framework/src/implementations/layers/vm_runner/playground.rs @@ -13,6 +13,7 @@ use zksync_vm_runner::{ use crate::{ implementations::resources::{ healthcheck::AppHealthCheckResource, + object_store::ObjectStoreResource, pools::{PoolResource, ReplicaPool}, }, StopReceiver, Task, TaskId, WiringError, WiringLayer, @@ -38,6 +39,7 @@ impl VmPlaygroundLayer { pub struct Input { // We use a replica pool because VM playground doesn't write anything to the DB by design. pub replica_pool: PoolResource, + pub dumps_object_store: Option, #[context(default)] pub app_health: AppHealthCheckResource, } @@ -65,6 +67,7 @@ impl WiringLayer for VmPlaygroundLayer { async fn wire(self, input: Self::Input) -> Result { let Input { replica_pool, + dumps_object_store, app_health, } = input; @@ -95,6 +98,7 @@ impl WiringLayer for VmPlaygroundLayer { }; let (playground, tasks) = VmPlayground::new( connection_pool, + dumps_object_store.map(|resource| resource.0), self.config.fast_vm_mode, storage, self.zksync_network_id, diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs new file mode 100644 index 00000000000..4ba8098c839 --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/bridge_addresses.rs @@ -0,0 +1,48 @@ +use std::time::Duration; + +use zksync_node_api_server::web3::state::BridgeAddressesHandle; +use zksync_web3_decl::{ + client::{DynClient, L2}, + namespaces::ZksNamespaceClient, +}; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct BridgeAddressesUpdaterTask { + pub bridge_address_updater: BridgeAddressesHandle, + pub main_node_client: Box>, + pub update_interval: Option, +} + +#[async_trait::async_trait] +impl Task for BridgeAddressesUpdaterTask { + fn id(&self) -> TaskId { + "bridge_addresses_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + const DEFAULT_INTERVAL: Duration = Duration::from_secs(30); + + let update_interval = self.update_interval.unwrap_or(DEFAULT_INTERVAL); + while !*stop_receiver.0.borrow_and_update() { + match self.main_node_client.get_bridge_contracts().await { + Ok(bridge_addresses) => { + self.bridge_address_updater.update(bridge_addresses).await; + } + Err(err) => { + tracing::error!("Failed to query `get_bridge_contracts`, error: {err:?}"); + } + } + + if tokio::time::timeout(update_interval, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs similarity index 81% rename from core/node/node_framework/src/implementations/layers/web3_api/server.rs rename to core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs index 0a39ae747c7..390d321647c 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/server.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/mod.rs @@ -3,15 +3,24 @@ use std::{num::NonZeroU32, time::Duration}; use tokio::{sync::oneshot, task::JoinHandle}; use zksync_circuit_breaker::replication_lag::ReplicationLagChecker; use zksync_config::configs::api::MaxResponseSize; -use zksync_node_api_server::web3::{state::InternalApiConfig, ApiBuilder, ApiServer, Namespace}; +use zksync_node_api_server::web3::{ + state::{BridgeAddressesHandle, InternalApiConfig, SealedL2BlockNumber}, + ApiBuilder, ApiServer, Namespace, +}; use crate::{ - implementations::resources::{ - circuit_breakers::CircuitBreakersResource, - healthcheck::AppHealthCheckResource, - pools::{PoolResource, ReplicaPool}, - sync_state::SyncStateResource, - web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + implementations::{ + layers::web3_api::server::{ + bridge_addresses::BridgeAddressesUpdaterTask, sealed_l2_block::SealedL2BlockUpdaterTask, + }, + resources::{ + circuit_breakers::CircuitBreakersResource, + healthcheck::AppHealthCheckResource, + main_node_client::MainNodeClientResource, + pools::{PoolResource, ReplicaPool}, + sync_state::SyncStateResource, + web3_api::{MempoolCacheResource, TreeApiClientResource, TxSenderResource}, + }, }, service::StopReceiver, task::{Task, TaskId}, @@ -19,6 +28,9 @@ use crate::{ FromContext, IntoContext, }; +mod bridge_addresses; +mod sealed_l2_block; + /// Set of optional variables that can be altered to modify the behavior of API builder. #[derive(Debug, Default)] pub struct Web3ServerOptionalConfig { @@ -33,6 +45,8 @@ pub struct Web3ServerOptionalConfig { pub replication_lag_limit: Option, // Used by the external node. pub pruning_info_refresh_interval: Option, + // Used by the external node. + pub bridge_addresses_refresh_interval: Option, pub polling_interval: Option, } @@ -61,6 +75,10 @@ impl Web3ServerOptionalConfig { if let Some(polling_interval) = self.polling_interval { api_builder = api_builder.with_polling_interval(polling_interval); } + if let Some(pruning_info_refresh_interval) = self.pruning_info_refresh_interval { + api_builder = + api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); + } api_builder = api_builder.with_extended_tracing(self.with_extended_tracing); api_builder } @@ -109,6 +127,7 @@ pub struct Input { pub circuit_breakers: CircuitBreakersResource, #[context(default)] pub app_health: AppHealthCheckResource, + pub main_node_client: Option, } #[derive(Debug, IntoContext)] @@ -118,6 +137,10 @@ pub struct Output { pub web3_api_task: Web3ApiTask, #[context(task)] pub garbage_collector_task: ApiTaskGarbageCollector, + #[context(task)] + pub sealed_l2_block_updater_task: SealedL2BlockUpdaterTask, + #[context(task)] + pub bridge_addresses_updater_task: Option, } impl Web3ServerLayer { @@ -163,20 +186,39 @@ impl WiringLayer for Web3ServerLayer { async fn wire(self, input: Self::Input) -> Result { // Get required resources. let replica_resource_pool = input.replica_pool; - let updaters_pool = replica_resource_pool.get_custom(2).await?; + let updaters_pool = replica_resource_pool.get_custom(1).await?; let replica_pool = replica_resource_pool.get().await?; let TxSenderResource(tx_sender) = input.tx_sender; let MempoolCacheResource(mempool_cache) = input.mempool_cache; let sync_state = input.sync_state.map(|state| state.0); let tree_api_client = input.tree_api_client.map(|client| client.0); + let sealed_l2_block_handle = SealedL2BlockNumber::default(); + let bridge_addresses_handle = + BridgeAddressesHandle::new(self.internal_api_config.bridge_addresses.clone()); + + let sealed_l2_block_updater_task = SealedL2BlockUpdaterTask { + number_updater: sealed_l2_block_handle.clone(), + pool: updaters_pool, + }; + // Bridge addresses updater task must be started for ENs and only for ENs. + let bridge_addresses_updater_task = + input + .main_node_client + .map(|main_node_client| BridgeAddressesUpdaterTask { + bridge_address_updater: bridge_addresses_handle.clone(), + main_node_client: main_node_client.0, + update_interval: self.optional_config.bridge_addresses_refresh_interval, + }); + // Build server. let mut api_builder = ApiBuilder::jsonrpsee_backend(self.internal_api_config, replica_pool.clone()) - .with_updaters_pool(updaters_pool) .with_tx_sender(tx_sender) .with_mempool_cache(mempool_cache) - .with_extended_tracing(self.optional_config.with_extended_tracing); + .with_extended_tracing(self.optional_config.with_extended_tracing) + .with_sealed_l2_block_handle(sealed_l2_block_handle) + .with_bridge_addresses_handle(bridge_addresses_handle); if let Some(client) = tree_api_client { api_builder = api_builder.with_tree_api(client); } @@ -191,14 +233,9 @@ impl WiringLayer for Web3ServerLayer { if let Some(sync_state) = sync_state { api_builder = api_builder.with_sync_state(sync_state); } - if let Some(pruning_info_refresh_interval) = - self.optional_config.pruning_info_refresh_interval - { - api_builder = - api_builder.with_pruning_info_refresh_interval(pruning_info_refresh_interval); - } let replication_lag_limit = self.optional_config.replication_lag_limit; api_builder = self.optional_config.apply(api_builder); + let server = api_builder.build()?; // Insert healthcheck. @@ -230,6 +267,8 @@ impl WiringLayer for Web3ServerLayer { Ok(Output { web3_api_task, garbage_collector_task, + sealed_l2_block_updater_task, + bridge_addresses_updater_task, }) } } diff --git a/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs new file mode 100644 index 00000000000..02552e212cd --- /dev/null +++ b/core/node/node_framework/src/implementations/layers/web3_api/server/sealed_l2_block.rs @@ -0,0 +1,50 @@ +use std::time::Duration; + +use zksync_dal::{Core, CoreDal}; +use zksync_db_connection::connection_pool::ConnectionPool; +use zksync_node_api_server::web3::state::SealedL2BlockNumber; + +use crate::{StopReceiver, Task, TaskId}; + +#[derive(Debug)] +pub struct SealedL2BlockUpdaterTask { + pub number_updater: SealedL2BlockNumber, + pub pool: ConnectionPool, +} + +#[async_trait::async_trait] +impl Task for SealedL2BlockUpdaterTask { + fn id(&self) -> TaskId { + "api_sealed_l2_block_updater_task".into() + } + + async fn run(self: Box, mut stop_receiver: StopReceiver) -> anyhow::Result<()> { + // Chosen to be significantly smaller than the interval between L2 blocks, but larger than + // the latency of getting the latest sealed L2 block number from Postgres. If the API server + // processes enough requests, information about the latest sealed L2 block will be updated + // by reporting block difference metrics, so the actual update lag would be much smaller than this value. + const UPDATE_INTERVAL: Duration = Duration::from_millis(25); + + while !*stop_receiver.0.borrow_and_update() { + let mut connection = self.pool.connection_tagged("api").await.unwrap(); + let Some(last_sealed_l2_block) = + connection.blocks_dal().get_sealed_l2_block_number().await? + else { + tokio::time::sleep(UPDATE_INTERVAL).await; + continue; + }; + drop(connection); + + self.number_updater.update(last_sealed_l2_block); + + if tokio::time::timeout(UPDATE_INTERVAL, stop_receiver.0.changed()) + .await + .is_ok() + { + break; + } + } + + Ok(()) + } +} diff --git a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs index 3574b8e8c24..a09938055fa 100644 --- a/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs +++ b/core/node/node_framework/src/implementations/layers/web3_api/tx_sender.rs @@ -3,10 +3,10 @@ use std::{sync::Arc, time::Duration}; use tokio::sync::RwLock; use zksync_node_api_server::{ execution_sandbox::{VmConcurrencyBarrier, VmConcurrencyLimiter}, - tx_sender::{ApiContracts, TxSenderBuilder, TxSenderConfig}, + tx_sender::{SandboxExecutorOptions, TxSenderBuilder, TxSenderConfig}, }; use zksync_state::{PostgresStorageCaches, PostgresStorageCachesTask}; -use zksync_types::Address; +use zksync_types::{AccountTreeId, Address}; use zksync_web3_decl::{ client::{DynClient, L2}, jsonrpsee, @@ -58,7 +58,6 @@ pub struct TxSenderLayer { tx_sender_config: TxSenderConfig, postgres_storage_caches_config: PostgresStorageCachesConfig, max_vm_concurrency: usize, - api_contracts: ApiContracts, whitelisted_tokens_for_aa_cache: bool, } @@ -89,13 +88,11 @@ impl TxSenderLayer { tx_sender_config: TxSenderConfig, postgres_storage_caches_config: PostgresStorageCachesConfig, max_vm_concurrency: usize, - api_contracts: ApiContracts, ) -> Self { Self { tx_sender_config, postgres_storage_caches_config, max_vm_concurrency, - api_contracts, whitelisted_tokens_for_aa_cache: false, } } @@ -148,8 +145,17 @@ impl WiringLayer for TxSenderLayer { let (vm_concurrency_limiter, vm_concurrency_barrier) = VmConcurrencyLimiter::new(self.max_vm_concurrency); + // TODO (BFT-138): Allow to dynamically reload API contracts + let config = self.tx_sender_config; + let executor_options = SandboxExecutorOptions::new( + config.chain_id, + AccountTreeId::new(config.fee_account_addr), + config.validation_computational_gas_limit, + ) + .await?; + // Build `TxSender`. - let mut tx_sender = TxSenderBuilder::new(self.tx_sender_config, replica_pool, tx_sink); + let mut tx_sender = TxSenderBuilder::new(config, replica_pool, tx_sink); if let Some(sealer) = sealer { tx_sender = tx_sender.with_sealer(sealer); } @@ -176,7 +182,7 @@ impl WiringLayer for TxSenderLayer { let tx_sender = tx_sender.build( fee_input, Arc::new(vm_concurrency_limiter), - self.api_contracts, + executor_options, storage_caches, ); diff --git a/core/node/node_sync/src/external_io.rs b/core/node/node_sync/src/external_io.rs index b7b8930c495..6075ff048bf 100644 --- a/core/node/node_sync/src/external_io.rs +++ b/core/node/node_sync/src/external_io.rs @@ -49,7 +49,7 @@ impl ExternalIO { main_node_client: Box, chain_id: L2ChainId, ) -> anyhow::Result { - let l1_batch_params_provider = L1BatchParamsProvider::new(); + let l1_batch_params_provider = L1BatchParamsProvider::uninitialized(); Ok(Self { pool, l1_batch_params_provider, diff --git a/core/node/proof_data_handler/src/tests.rs b/core/node/proof_data_handler/src/tests.rs index 6ab7e4dec43..86cc5323448 100644 --- a/core/node/proof_data_handler/src/tests.rs +++ b/core/node/proof_data_handler/src/tests.rs @@ -131,9 +131,9 @@ async fn submit_tee_proof() { // send a request to the /tee/submit_proofs endpoint, using a mocked TEE proof let tee_proof_request_str = r#"{ - "signature": [ 0, 1, 2, 3, 4 ], - "pubkey": [ 5, 6, 7, 8, 9 ], - "proof": [ 10, 11, 12, 13, 14 ], + "signature": "0001020304", + "pubkey": "0506070809", + "proof": "0A0B0C0D0E", "tee_type": "sgx" }"#; let tee_proof_request = diff --git a/core/node/state_keeper/Cargo.toml b/core/node/state_keeper/Cargo.toml index 1810cc00de5..0e924b9f066 100644 --- a/core/node/state_keeper/Cargo.toml +++ b/core/node/state_keeper/Cargo.toml @@ -44,6 +44,7 @@ hex.workspace = true [dev-dependencies] assert_matches.workspace = true +rand.workspace = true tempfile.workspace = true test-casing.workspace = true futures.workspace = true diff --git a/core/node/state_keeper/src/executor/tests/mod.rs b/core/node/state_keeper/src/executor/tests/mod.rs index 6fa4522d43f..04fb016ab63 100644 --- a/core/node/state_keeper/src/executor/tests/mod.rs +++ b/core/node/state_keeper/src/executor/tests/mod.rs @@ -1,8 +1,8 @@ // FIXME: move storage-agnostic tests to VM executor crate use assert_matches::assert_matches; +use rand::{thread_rng, Rng}; use test_casing::{test_casing, Product}; -use tester::AccountFailedCall; use zksync_dal::{ConnectionPool, Core}; use zksync_multivm::interface::{BatchTransactionExecutionResult, ExecutionResult, Halt}; use zksync_test_account::Account; @@ -10,7 +10,9 @@ use zksync_types::{ get_nonce_key, utils::storage_key_for_eth_balance, vm::FastVmMode, PriorityOpId, }; -use self::tester::{AccountLoadNextExecutable, StorageSnapshot, TestConfig, Tester}; +use self::tester::{ + AccountFailedCall, AccountLoadNextExecutable, StorageSnapshot, TestConfig, Tester, +}; mod read_storage_factory; mod tester; @@ -425,7 +427,7 @@ async fn bootloader_out_of_gas_for_any_tx(vm_mode: FastVmMode) { let mut tester = Tester::with_config( connection_pool, TestConfig { - save_call_traces: false, + trace_calls: false, vm_gas_limit: Some(10), validation_computational_gas_limit: u32::MAX, fast_vm_mode: vm_mode, @@ -470,7 +472,7 @@ async fn bootloader_tip_out_of_gas() { // Just a bit below the gas used for the previous batch execution should be fine to execute the tx // but not enough to execute the block tip. tester.set_config(TestConfig { - save_call_traces: false, + trace_calls: false, vm_gas_limit: Some( finished_batch .block_tip_execution_result @@ -538,3 +540,60 @@ async fn catchup_rocksdb_cache() { let res = executor.execute_tx(tx).await.unwrap(); assert_rejected(&res); } + +#[test_casing(3, FAST_VM_MODES)] +#[tokio::test] +async fn execute_tx_with_large_packable_bytecode(vm_mode: FastVmMode) { + // The rough length of the packed bytecode should be 350_000 / 4 = 87500, + // which should fit into a batch + const BYTECODE_LEN: usize = 350_016 + 32; // +32 to ensure validity of the bytecode + + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let mut alice = Account::random(); + let mut tester = Tester::new(connection_pool, vm_mode); + let mut rng = thread_rng(); + + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let mut executor = tester + .create_batch_executor(StorageType::AsyncRocksdbCache) + .await; + + let mut packable_bytecode = vec![]; + while packable_bytecode.len() < BYTECODE_LEN { + packable_bytecode.extend_from_slice(&if rng.gen() { [0_u8; 8] } else { [0xff_u8; 8] }); + } + let tx = alice.execute_with_factory_deps(vec![packable_bytecode.clone()]); + + let res = executor.execute_tx(tx).await.unwrap(); + assert_matches!(res.tx_result.result, ExecutionResult::Success { .. }); + assert_eq!(res.compressed_bytecodes.len(), 1); + assert_eq!(res.compressed_bytecodes[0].original, packable_bytecode); + assert!(res.compressed_bytecodes[0].compressed.len() < BYTECODE_LEN / 2); + + executor.finish_batch().await.unwrap(); +} + +#[test_casing(2, [FastVmMode::Old, FastVmMode::Shadow])] // new VM doesn't support call tracing yet +#[tokio::test] +async fn execute_tx_with_call_traces(vm_mode: FastVmMode) { + let connection_pool = ConnectionPool::::constrained_test_pool(1).await; + let mut alice = Account::random(); + let mut tester = Tester::with_config( + connection_pool, + TestConfig { + trace_calls: true, + ..TestConfig::new(vm_mode) + }, + ); + + tester.genesis().await; + tester.fund(&[alice.address()]).await; + let mut executor = tester + .create_batch_executor(StorageType::AsyncRocksdbCache) + .await; + let res = executor.execute_tx(alice.execute()).await.unwrap(); + + assert_matches!(res.tx_result.result, ExecutionResult::Success { .. }); + assert!(!res.call_traces.is_empty()); +} diff --git a/core/node/state_keeper/src/executor/tests/tester.rs b/core/node/state_keeper/src/executor/tests/tester.rs index 8256435f2f5..7a1871dbfea 100644 --- a/core/node/state_keeper/src/executor/tests/tester.rs +++ b/core/node/state_keeper/src/executor/tests/tester.rs @@ -35,7 +35,7 @@ use zksync_types::{ StorageLog, Transaction, H256, L2_BASE_TOKEN_ADDRESS, U256, }; use zksync_utils::u256_to_h256; -use zksync_vm_executor::batch::MainBatchExecutorFactory; +use zksync_vm_executor::batch::{MainBatchExecutorFactory, TraceCalls}; use super::{read_storage_factory::RocksdbStorageFactory, StorageType}; use crate::{ @@ -49,7 +49,7 @@ use crate::{ /// Has sensible defaults for most tests, each of which can be overridden. #[derive(Debug)] pub(super) struct TestConfig { - pub(super) save_call_traces: bool, + pub(super) trace_calls: bool, pub(super) vm_gas_limit: Option, pub(super) validation_computational_gas_limit: u32, pub(super) fast_vm_mode: FastVmMode, @@ -60,8 +60,8 @@ impl TestConfig { let config = StateKeeperConfig::for_tests(); Self { + trace_calls: false, vm_gas_limit: None, - save_call_traces: false, validation_computational_gas_limit: config.validation_computational_gas_limit, fast_vm_mode, } @@ -149,16 +149,21 @@ impl Tester { l1_batch_env: L1BatchEnv, system_env: SystemEnv, ) -> Box> { - let mut batch_executor = MainBatchExecutorFactory::new(self.config.save_call_traces, false); - batch_executor.set_fast_vm_mode(self.config.fast_vm_mode); - let (_stop_sender, stop_receiver) = watch::channel(false); let storage = storage_factory .access_storage(&stop_receiver, l1_batch_env.number - 1) .await .expect("failed creating VM storage") .unwrap(); - batch_executor.init_batch(storage, l1_batch_env, system_env) + if self.config.trace_calls { + let mut executor = MainBatchExecutorFactory::::new(false); + executor.set_fast_vm_mode(self.config.fast_vm_mode); + executor.init_batch(storage, l1_batch_env, system_env) + } else { + let mut executor = MainBatchExecutorFactory::<()>::new(false); + executor.set_fast_vm_mode(self.config.fast_vm_mode); + executor.init_batch(storage, l1_batch_env, system_env) + } } pub(super) async fn recover_batch_executor( @@ -319,6 +324,9 @@ pub trait AccountLoadNextExecutable { /// Returns a valid `execute` transaction. /// Automatically increments nonce of the account. fn execute(&mut self) -> Transaction; + /// Returns an `execute` transaction with custom factory deps (which aren't used in a transaction, + /// so they are mostly useful to test bytecode compression). + fn execute_with_factory_deps(&mut self, factory_deps: Vec>) -> Transaction; fn loadnext_custom_writes_call( &mut self, address: Address, @@ -377,6 +385,18 @@ impl AccountLoadNextExecutable for Account { self.execute_with_gas_limit(1_000_000) } + fn execute_with_factory_deps(&mut self, factory_deps: Vec>) -> Transaction { + self.get_l2_tx_for_execute( + Execute { + contract_address: Some(Address::random()), + calldata: vec![], + value: Default::default(), + factory_deps, + }, + Some(testonly::fee(30_000_000)), + ) + } + /// Returns a transaction to the loadnext contract with custom amount of write requests. /// Increments the account nonce. fn loadnext_custom_writes_call( @@ -396,7 +416,7 @@ impl AccountLoadNextExecutable for Account { self.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata: LoadnextContractExecutionParams { reads: 100, writes: writes as usize, @@ -432,7 +452,7 @@ impl AccountLoadNextExecutable for Account { self.get_l2_tx_for_execute( Execute { - contract_address: address, + contract_address: Some(address), calldata, value: Default::default(), factory_deps: vec![], diff --git a/core/node/state_keeper/src/io/common/tests.rs b/core/node/state_keeper/src/io/common/tests.rs index 9ea699234f8..b2a24acb495 100644 --- a/core/node/state_keeper/src/io/common/tests.rs +++ b/core/node/state_keeper/src/io/common/tests.rs @@ -103,8 +103,7 @@ async fn waiting_for_l1_batch_params_with_genesis() { .await .unwrap(); - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (hash, timestamp) = provider .wait_for_l1_batch_params(&mut storage, L1BatchNumber(0)) .await @@ -143,8 +142,7 @@ async fn waiting_for_l1_batch_params_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (hash, timestamp) = provider .wait_for_l1_batch_params(&mut storage, snapshot_recovery.l1_batch_number) .await @@ -192,8 +190,7 @@ async fn getting_first_l2_block_in_batch_with_genesis() { .await .unwrap(); - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let mut batches_and_l2_blocks = HashMap::from([ (L1BatchNumber(0), Ok(Some(L2BlockNumber(0)))), (L1BatchNumber(1), Ok(Some(L2BlockNumber(1)))), @@ -264,8 +261,7 @@ async fn getting_first_l2_block_in_batch_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let mut batches_and_l2_blocks = HashMap::from([ (L1BatchNumber(1), Err(())), (snapshot_recovery.l1_batch_number, Err(())), @@ -321,24 +317,20 @@ async fn loading_pending_batch_with_genesis() { ) .await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); - let first_l2_block_in_batch = provider - .load_first_l2_block_in_batch(&mut storage, L1BatchNumber(1)) - .await - .unwrap() - .expect("no first L2 block"); - assert_eq!(first_l2_block_in_batch.number(), L2BlockNumber(1)); - + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (system_env, l1_batch_env) = provider - .load_l1_batch_params( + .load_l1_batch_env( &mut storage, - &first_l2_block_in_batch, + L1BatchNumber(1), u32::MAX, L2ChainId::default(), ) .await - .unwrap(); + .unwrap() + .expect("no L1 batch"); + + assert_eq!(l1_batch_env.first_l2_block.number, 1); + let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .unwrap(); @@ -403,27 +395,17 @@ async fn loading_pending_batch_after_snapshot_recovery() { ) .await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); - let first_l2_block_in_batch = provider - .load_first_l2_block_in_batch(&mut storage, snapshot_recovery.l1_batch_number + 1) - .await - .unwrap() - .expect("no first L2 block"); - assert_eq!( - first_l2_block_in_batch.number(), - snapshot_recovery.l2_block_number + 1 - ); - + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let (system_env, l1_batch_env) = provider - .load_l1_batch_params( + .load_l1_batch_env( &mut storage, - &first_l2_block_in_batch, + snapshot_recovery.l1_batch_number + 1, u32::MAX, L2ChainId::default(), ) .await - .unwrap(); + .unwrap() + .expect("no L1 batch"); let pending_batch = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .unwrap(); @@ -466,8 +448,7 @@ async fn getting_batch_version_with_genesis() { .await .unwrap(); - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let version = provider .load_l1_batch_protocol_version(&mut storage, L1BatchNumber(0)) .await @@ -506,8 +487,7 @@ async fn getting_batch_version_after_snapshot_recovery() { let snapshot_recovery = prepare_recovery_snapshot(&mut storage, L1BatchNumber(23), L2BlockNumber(42), &[]).await; - let mut provider = L1BatchParamsProvider::new(); - provider.initialize(&mut storage).await.unwrap(); + let provider = L1BatchParamsProvider::new(&mut storage).await.unwrap(); let version = provider .load_l1_batch_protocol_version(&mut storage, snapshot_recovery.l1_batch_number) .await diff --git a/core/node/state_keeper/src/io/mempool.rs b/core/node/state_keeper/src/io/mempool.rs index 5734977538b..108283122bc 100644 --- a/core/node/state_keeper/src/io/mempool.rs +++ b/core/node/state_keeper/src/io/mempool.rs @@ -97,30 +97,18 @@ impl StateKeeperIO for MempoolIO { L2BlockSealProcess::clear_pending_l2_block(&mut storage, cursor.next_l2_block - 1).await?; - let pending_l2_block_header = self + let Some((system_env, l1_batch_env)) = self .l1_batch_params_provider - .load_first_l2_block_in_batch(&mut storage, cursor.l1_batch) - .await - .with_context(|| { - format!( - "failed loading first L2 block for L1 batch #{}", - cursor.l1_batch - ) - })?; - let Some(pending_l2_block_header) = pending_l2_block_header else { - return Ok((cursor, None)); - }; - - let (system_env, l1_batch_env) = self - .l1_batch_params_provider - .load_l1_batch_params( + .load_l1_batch_env( &mut storage, - &pending_l2_block_header, + cursor.l1_batch, self.validation_computational_gas_limit, self.chain_id, ) - .await - .with_context(|| format!("failed loading params for L1 batch #{}", cursor.l1_batch))?; + .await? + else { + return Ok((cursor, None)); + }; let pending_batch_data = load_pending_batch(&mut storage, system_env, l1_batch_env) .await .with_context(|| { @@ -436,7 +424,7 @@ impl MempoolIO { l2_block_max_payload_size_sealer: L2BlockMaxPayloadSizeSealer::new(config), filter: L2TxFilter::default(), // ^ Will be initialized properly on the first newly opened batch - l1_batch_params_provider: L1BatchParamsProvider::new(), + l1_batch_params_provider: L1BatchParamsProvider::uninitialized(), fee_account, validation_computational_gas_limit: config.validation_computational_gas_limit, max_allowed_tx_gas_limit: config.max_allowed_l2_tx_gas_limit.into(), diff --git a/core/node/state_keeper/src/testonly/mod.rs b/core/node/state_keeper/src/testonly/mod.rs index 23aec8af49f..edcf3ccc4f5 100644 --- a/core/node/state_keeper/src/testonly/mod.rs +++ b/core/node/state_keeper/src/testonly/mod.rs @@ -131,7 +131,7 @@ pub fn fee(gas_limit: u32) -> Fee { pub fn l2_transaction(account: &mut Account, gas_limit: u32) -> Transaction { account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: vec![], value: Default::default(), factory_deps: vec![], @@ -143,7 +143,7 @@ pub fn l2_transaction(account: &mut Account, gas_limit: u32) -> Transaction { pub fn l1_transaction(account: &mut Account, serial_id: PriorityOpId) -> Transaction { account.get_l1_tx( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), value: Default::default(), calldata: vec![], factory_deps: vec![], diff --git a/core/node/tee_verifier_input_producer/src/lib.rs b/core/node/tee_verifier_input_producer/src/lib.rs index 08382903ad6..8a99aa07ae5 100644 --- a/core/node/tee_verifier_input_producer/src/lib.rs +++ b/core/node/tee_verifier_input_producer/src/lib.rs @@ -77,34 +77,24 @@ impl TeeVerifierInputProducer { .with_context(|| format!("header is missing for L1 batch #{l1_batch_number}"))? .unwrap(); - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - l1_batch_params_provider - .initialize(&mut connection) + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut connection) .await .context("failed initializing L1 batch params provider")?; - let first_miniblock_in_batch = l1_batch_params_provider - .load_first_l2_block_in_batch(&mut connection, l1_batch_number) - .await - .with_context(|| { - format!("failed loading first miniblock in L1 batch #{l1_batch_number}") - })? - .with_context(|| format!("no miniblocks persisted for L1 batch #{l1_batch_number}"))?; - // In the state keeper, this value is used to reject execution. // All batches have already been executed by State Keeper. // This means we don't want to reject any execution, therefore we're using MAX as an allow all. let validation_computational_gas_limit = u32::MAX; let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_params( + .load_l1_batch_env( &mut connection, - &first_miniblock_in_batch, + l1_batch_number, validation_computational_gas_limit, l2_chain_id, ) - .await - .context("expected miniblock to be executed and sealed")?; + .await? + .with_context(|| format!("expected L1 batch #{l1_batch_number} to be sealed"))?; let used_contract_hashes = l1_batch_header .used_contract_hashes diff --git a/core/node/test_utils/src/lib.rs b/core/node/test_utils/src/lib.rs index 7d81745877d..a065ebc7182 100644 --- a/core/node/test_utils/src/lib.rs +++ b/core/node/test_utils/src/lib.rs @@ -96,7 +96,7 @@ pub fn create_l1_batch_metadata(number: u32) -> L1BatchMetadata { events_queue_commitment: Some(H256::zero()), bootloader_initial_content_commitment: Some(H256::zero()), state_diffs_compressed: vec![], - da_blob_id: Some(vec![]) + da_blob_id: Some(vec![]), } } @@ -139,7 +139,7 @@ pub fn create_l2_transaction(fee_per_gas: u64, gas_per_pubdata: u64) -> L2Tx { gas_per_pubdata_limit: gas_per_pubdata.into(), }; let mut tx = L2Tx::new_signed( - Address::random(), + Some(Address::random()), vec![], Nonce(0), fee, diff --git a/core/node/vm_runner/Cargo.toml b/core/node/vm_runner/Cargo.toml index ceb11a98247..9c235ad6b29 100644 --- a/core/node/vm_runner/Cargo.toml +++ b/core/node/vm_runner/Cargo.toml @@ -24,6 +24,7 @@ zksync_vm_executor.workspace = true zksync_health_check.workspace = true serde.workspace = true +serde_json.workspace = true tokio = { workspace = true, features = ["time"] } anyhow.workspace = true async-trait.workspace = true diff --git a/core/node/vm_runner/src/impls/playground.rs b/core/node/vm_runner/src/impls/playground.rs index dc21d5a3203..618a3a86c82 100644 --- a/core/node/vm_runner/src/impls/playground.rs +++ b/core/node/vm_runner/src/impls/playground.rs @@ -1,4 +1,5 @@ use std::{ + hash::{DefaultHasher, Hash, Hasher}, io, num::NonZeroU32, path::{Path, PathBuf}, @@ -14,10 +15,14 @@ use tokio::{ }; use zksync_dal::{Connection, ConnectionPool, Core, CoreDal}; use zksync_health_check::{Health, HealthStatus, HealthUpdater, ReactiveHealthCheck}; +use zksync_object_store::{Bucket, ObjectStore}; use zksync_state::RocksdbStorage; use zksync_types::{vm::FastVmMode, L1BatchNumber, L2ChainId}; use zksync_vm_executor::batch::MainBatchExecutorFactory; -use zksync_vm_interface::{L1BatchEnv, L2BlockEnv, SystemEnv}; +use zksync_vm_interface::{ + utils::{DivergenceHandler, VmDump}, + L1BatchEnv, L2BlockEnv, SystemEnv, +}; use crate::{ storage::{PostgresLoader, StorageLoader}, @@ -82,7 +87,7 @@ enum VmPlaygroundStorage { #[derive(Debug)] pub struct VmPlayground { pool: ConnectionPool, - batch_executor_factory: MainBatchExecutorFactory, + batch_executor_factory: MainBatchExecutorFactory<()>, storage: VmPlaygroundStorage, chain_id: L2ChainId, io: VmPlaygroundIo, @@ -95,6 +100,7 @@ impl VmPlayground { /// Creates a new playground. pub async fn new( pool: ConnectionPool, + dumps_object_store: Option>, vm_mode: FastVmMode, storage: VmPlaygroundStorageOptions, chain_id: L2ChainId, @@ -127,9 +133,25 @@ impl VmPlayground { latest_processed_batch.unwrap_or(cursor.first_processed_batch) }; - let mut batch_executor_factory = MainBatchExecutorFactory::new(false, false); + let mut batch_executor_factory = MainBatchExecutorFactory::new(false); batch_executor_factory.set_fast_vm_mode(vm_mode); batch_executor_factory.observe_storage_metrics(); + let handle = tokio::runtime::Handle::current(); + if let Some(store) = dumps_object_store { + tracing::info!("Using object store for VM dumps: {store:?}"); + + let handler = DivergenceHandler::new(move |err, dump| { + let err_message = err.to_string(); + if let Err(err) = handle.block_on(Self::dump_vm_state(&*store, &err_message, &dump)) + { + let l1_batch_number = dump.l1_batch_number(); + tracing::error!( + "Saving VM dump for L1 batch #{l1_batch_number} failed: {err:#}" + ); + } + }); + batch_executor_factory.set_divergence_handler(handler); + } let io = VmPlaygroundIo { cursor_file_path, @@ -176,6 +198,27 @@ impl VmPlayground { )) } + async fn dump_vm_state( + object_store: &dyn ObjectStore, + err_message: &str, + dump: &VmDump, + ) -> anyhow::Result<()> { + // Deduplicate VM dumps by the error hash so that we don't create a lot of dumps for the same error. + let mut hasher = DefaultHasher::new(); + err_message.hash(&mut hasher); + let err_hash = hasher.finish(); + let batch_number = dump.l1_batch_number().0; + let dump_filename = format!("shadow_vm_dump_batch{batch_number:08}_{err_hash:x}.json"); + + tracing::info!("Dumping diverged VM state to `{dump_filename}`"); + let dump = serde_json::to_string(&dump).context("failed serializing VM dump")?; + object_store + .put_raw(Bucket::VmDumps, &dump_filename, dump.into_bytes()) + .await + .context("failed putting VM dump to object store")?; + Ok(()) + } + /// Returns a health check for this component. pub fn health_check(&self) -> ReactiveHealthCheck { self.io.health_updater.subscribe() diff --git a/core/node/vm_runner/src/impls/protective_reads.rs b/core/node/vm_runner/src/impls/protective_reads.rs index b1aff9fe382..7f968ee24bb 100644 --- a/core/node/vm_runner/src/impls/protective_reads.rs +++ b/core/node/vm_runner/src/impls/protective_reads.rs @@ -38,7 +38,7 @@ impl ProtectiveReadsWriter { let output_handler_factory = ProtectiveReadsOutputHandlerFactory { pool: pool.clone() }; let (output_handler_factory, output_handler_factory_task) = ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), output_handler_factory); - let batch_processor = MainBatchExecutorFactory::new(false, false); + let batch_processor = MainBatchExecutorFactory::<()>::new(false); let vm_runner = VmRunner::new( pool, Arc::new(io), diff --git a/core/node/vm_runner/src/storage.rs b/core/node/vm_runner/src/storage.rs index cd746e4e1d9..2285455ba24 100644 --- a/core/node/vm_runner/src/storage.rs +++ b/core/node/vm_runner/src/storage.rs @@ -48,9 +48,8 @@ pub(crate) struct PostgresLoader { impl PostgresLoader { pub async fn new(pool: ConnectionPool, chain_id: L2ChainId) -> anyhow::Result { - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); let mut conn = pool.connection_tagged("vm_runner").await?; - l1_batch_params_provider.initialize(&mut conn).await?; + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn).await?; Ok(Self { pool, l1_batch_params_provider, @@ -151,12 +150,11 @@ impl VmRunnerStorage { chain_id: L2ChainId, ) -> anyhow::Result<(Self, StorageSyncTask)> { let mut conn = pool.connection_tagged(io.name()).await?; - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - l1_batch_params_provider - .initialize(&mut conn) + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn) .await .context("Failed initializing L1 batch params provider")?; drop(conn); + let state = Arc::new(RwLock::new(State { rocksdb: None, l1_batch_number: L1BatchNumber(0), @@ -263,9 +261,7 @@ impl StorageSyncTask { state: Arc>, ) -> anyhow::Result { let mut conn = pool.connection_tagged(io.name()).await?; - let mut l1_batch_params_provider = L1BatchParamsProvider::new(); - l1_batch_params_provider - .initialize(&mut conn) + let l1_batch_params_provider = L1BatchParamsProvider::new(&mut conn) .await .context("Failed initializing L1 batch params provider")?; let target_l1_batch_number = io.latest_processed_batch(&mut conn).await?; @@ -398,29 +394,20 @@ pub(crate) async fn load_batch_execute_data( l1_batch_params_provider: &L1BatchParamsProvider, chain_id: L2ChainId, ) -> anyhow::Result> { - let first_l2_block_in_batch = l1_batch_params_provider - .load_first_l2_block_in_batch(conn, l1_batch_number) - .await - .with_context(|| { - format!( - "Failed loading first L2 block for L1 batch #{}", - l1_batch_number - ) - })?; - let Some(first_l2_block_in_batch) = first_l2_block_in_batch else { - return Ok(None); - }; - let (system_env, l1_batch_env) = l1_batch_params_provider - .load_l1_batch_params( + let Some((system_env, l1_batch_env)) = l1_batch_params_provider + .load_l1_batch_env( conn, - &first_l2_block_in_batch, + l1_batch_number, // `validation_computational_gas_limit` is only relevant when rejecting txs, but we // are re-executing so none of them should be rejected u32::MAX, chain_id, ) - .await - .with_context(|| format!("Failed loading params for L1 batch #{}", l1_batch_number))?; + .await? + else { + return Ok(None); + }; + let l2_blocks = conn .transactions_dal() .get_l2_blocks_to_execute_for_l1_batch(l1_batch_number) diff --git a/core/node/vm_runner/src/tests/mod.rs b/core/node/vm_runner/src/tests/mod.rs index 9fe9e99e92c..53bef106a8f 100644 --- a/core/node/vm_runner/src/tests/mod.rs +++ b/core/node/vm_runner/src/tests/mod.rs @@ -202,7 +202,7 @@ pub fn create_l2_transaction( }; let tx = account.get_l2_tx_for_execute( Execute { - contract_address: Address::random(), + contract_address: Some(Address::random()), calldata: vec![], value: Default::default(), factory_deps: vec![], diff --git a/core/node/vm_runner/src/tests/playground.rs b/core/node/vm_runner/src/tests/playground.rs index aaaf4b45b1a..92cd149f405 100644 --- a/core/node/vm_runner/src/tests/playground.rs +++ b/core/node/vm_runner/src/tests/playground.rs @@ -74,6 +74,7 @@ async fn run_playground( let (playground, playground_tasks) = VmPlayground::new( pool.clone(), + None, FastVmMode::Shadow, storage, genesis_params.config().l2_chain_id, @@ -255,6 +256,7 @@ async fn using_larger_window_size(window_size: u32) { }; let (playground, playground_tasks) = VmPlayground::new( pool.clone(), + None, FastVmMode::Shadow, VmPlaygroundStorageOptions::from(&rocksdb_dir), genesis_params.config().l2_chain_id, diff --git a/core/node/vm_runner/src/tests/process.rs b/core/node/vm_runner/src/tests/process.rs index 115410ce8fb..8e9bd66f3c9 100644 --- a/core/node/vm_runner/src/tests/process.rs +++ b/core/node/vm_runner/src/tests/process.rs @@ -54,7 +54,7 @@ async fn process_batches((batch_count, window): (u32, u32)) -> anyhow::Result<() tokio::task::spawn(async move { task.run(output_stop_receiver).await.unwrap() }); let storage = Arc::new(storage); - let batch_executor = MainBatchExecutorFactory::new(false, false); + let batch_executor = MainBatchExecutorFactory::<()>::new(false); let vm_runner = VmRunner::new( connection_pool, io.clone(), diff --git a/core/node/vm_runner/src/tests/storage_writer.rs b/core/node/vm_runner/src/tests/storage_writer.rs index c377cf95b5a..28a244308fe 100644 --- a/core/node/vm_runner/src/tests/storage_writer.rs +++ b/core/node/vm_runner/src/tests/storage_writer.rs @@ -181,7 +181,7 @@ pub(super) async fn write_storage_logs(pool: ConnectionPool, insert_protec .await .unwrap(); let loader = Arc::new(loader); - let batch_executor = MainBatchExecutorFactory::new(false, false); + let batch_executor = MainBatchExecutorFactory::<()>::new(false); let vm_runner = VmRunner::new(pool, io.clone(), loader, io, Box::new(batch_executor)); let (stop_sender, stop_receiver) = watch::channel(false); let vm_runner_handle = tokio::spawn(async move { vm_runner.run(&stop_receiver).await }); @@ -239,7 +239,7 @@ async fn storage_writer_works(insert_protective_reads: bool) { let (output_factory, output_factory_task) = ConcurrentOutputHandlerFactory::new(pool.clone(), io.clone(), TestOutputFactory::default()); let output_factory_handle = tokio::spawn(output_factory_task.run(stop_receiver.clone())); - let batch_executor = MainBatchExecutorFactory::new(false, false); + let batch_executor = MainBatchExecutorFactory::<()>::new(false); let vm_runner = VmRunner::new( pool, io.clone(), diff --git a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs index 161d156a53e..67e877ae8ef 100644 --- a/core/tests/loadnext/src/sdk/operations/deploy_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/deploy_contract.rs @@ -145,7 +145,7 @@ where let mut factory_deps = self.factory_deps.clone().unwrap_or_default(); factory_deps.push(bytecode); let l2_tx = L2Tx::new( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), Execute::encode_deploy_params_create(Default::default(), main_contract_hash, calldata), Nonce(0), Default::default(), diff --git a/core/tests/loadnext/src/sdk/operations/execute_contract.rs b/core/tests/loadnext/src/sdk/operations/execute_contract.rs index d5fe57c7b79..627e889ed01 100644 --- a/core/tests/loadnext/src/sdk/operations/execute_contract.rs +++ b/core/tests/loadnext/src/sdk/operations/execute_contract.rs @@ -144,7 +144,7 @@ where .unwrap_or_default(); let execute = L2Tx::new( - contract_address, + Some(contract_address), calldata, Nonce(0), Default::default(), diff --git a/core/tests/loadnext/src/sdk/operations/transfer.rs b/core/tests/loadnext/src/sdk/operations/transfer.rs index 94ee3aeb608..651fabeb788 100644 --- a/core/tests/loadnext/src/sdk/operations/transfer.rs +++ b/core/tests/loadnext/src/sdk/operations/transfer.rs @@ -153,7 +153,7 @@ where let tx = if token.is_zero() || token == L2_BASE_TOKEN_ADDRESS { // ETH estimate Execute { - contract_address: to, + contract_address: Some(to), calldata: Default::default(), factory_deps: vec![], value: amount, @@ -161,7 +161,7 @@ where } else { // ERC-20 estimate Execute { - contract_address: token, + contract_address: Some(token), calldata: create_transfer_calldata(to, amount), factory_deps: vec![], value: Default::default(), diff --git a/core/tests/loadnext/src/sdk/signer.rs b/core/tests/loadnext/src/sdk/signer.rs index 0f4b1cf2971..6f98f674ed9 100644 --- a/core/tests/loadnext/src/sdk/signer.rs +++ b/core/tests/loadnext/src/sdk/signer.rs @@ -51,7 +51,7 @@ impl Signer { // Sign Ether transfer if token.is_zero() || token == L2_BASE_TOKEN_ADDRESS { let mut transfer = L2Tx::new( - to, + Some(to), Default::default(), nonce, fee, @@ -73,7 +73,7 @@ impl Signer { // Sign ERC-20 transfer let data = create_transfer_calldata(to, amount); let mut transfer = L2Tx::new( - token, + Some(token), data, nonce, fee, @@ -122,7 +122,7 @@ impl Signer { paymaster_params: PaymasterParams, ) -> Result { let mut execute_contract = L2Tx::new( - contract, + Some(contract), calldata, nonce, fee, diff --git a/core/tests/recovery-test/src/index.ts b/core/tests/recovery-test/src/index.ts index 6599e7c5d29..462404af606 100644 --- a/core/tests/recovery-test/src/index.ts +++ b/core/tests/recovery-test/src/index.ts @@ -271,7 +271,7 @@ export class FundedWallet { await depositTx.waitFinalize(); } - /** Generates at least one L1 batch by transfering funds to itself. */ + /** Generates at least one L1 batch by transferring funds to itself. */ async generateL1Batch(): Promise { const transactionResponse = await this.wallet.transfer({ to: this.wallet.address, @@ -279,15 +279,15 @@ export class FundedWallet { token: zksync.utils.ETH_ADDRESS }); console.log('Generated a transaction from funded wallet', transactionResponse); - const receipt = await transactionResponse.wait(); - console.log('Got finalized transaction receipt', receipt); - // Wait until an L1 batch with the transaction is sealed. - const pastL1BatchNumber = await this.wallet.provider.getL1BatchNumber(); - let newL1BatchNumber: number; - while ((newL1BatchNumber = await this.wallet.provider.getL1BatchNumber()) <= pastL1BatchNumber) { + let receipt: zksync.types.TransactionReceipt; + while (!(receipt = await transactionResponse.wait()).l1BatchNumber) { + console.log('Transaction is not included in L1 batch; sleeping'); await sleep(1000); } + + console.log('Got finalized transaction receipt', receipt); + const newL1BatchNumber = receipt.l1BatchNumber; console.log(`Sealed L1 batch #${newL1BatchNumber}`); return newL1BatchNumber; } diff --git a/core/tests/test_account/src/lib.rs b/core/tests/test_account/src/lib.rs index 28e3d609e63..999ea6eb6e0 100644 --- a/core/tests/test_account/src/lib.rs +++ b/core/tests/test_account/src/lib.rs @@ -2,7 +2,7 @@ use ethabi::Token; use zksync_contracts::{ deployer_contract, load_contract, test_contracts::LoadnextContractExecutionParams, }; -use zksync_eth_signer::{EthereumSigner, PrivateKeySigner, TransactionParameters}; +use zksync_eth_signer::{PrivateKeySigner, TransactionParameters}; use zksync_system_constants::{ CONTRACT_DEPLOYER_ADDRESS, DEFAULT_L2_TX_GAS_PER_PUBDATA_BYTE, REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE, @@ -129,7 +129,7 @@ impl Account { .expect("failed to encode parameters"); let execute = Execute { - contract_address: CONTRACT_DEPLOYER_ADDRESS, + contract_address: Some(CONTRACT_DEPLOYER_ADDRESS), calldata, factory_deps, value: U256::zero(), @@ -158,7 +158,7 @@ impl Account { tx: abi::L2CanonicalTransaction { tx_type: PRIORITY_OPERATION_L2_TX_TYPE.into(), from: address_to_u256(&self.address), - to: address_to_u256(&execute.contract_address), + to: address_to_u256(&execute.contract_address.unwrap_or_default()), gas_limit, gas_per_pubdata_byte_limit: REQUIRED_L1_TO_L2_GAS_PER_PUBDATA_BYTE.into(), max_fee_per_gas, @@ -216,7 +216,7 @@ impl Account { .expect("failed to encode parameters"); let execute = Execute { - contract_address: address, + contract_address: Some(address), calldata, value: value.unwrap_or_default(), factory_deps: vec![], @@ -235,7 +235,7 @@ impl Account { ) -> Transaction { let calldata = params.to_bytes(); let execute = Execute { - contract_address: address, + contract_address: Some(address), calldata, value: U256::zero(), factory_deps: vec![], @@ -255,8 +255,8 @@ impl Account { PrivateKeySigner::new(self.private_key.clone()) } - pub async fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { + pub fn sign_legacy_tx(&self, tx: TransactionParameters) -> Vec { let pk_signer = self.get_pk_signer(); - pk_signer.sign_transaction(tx).await.unwrap() + pk_signer.sign_transaction(tx) } } diff --git a/core/tests/ts-integration/jest.config.json b/core/tests/ts-integration/jest.config.json index 8fa5ea1eb72..1756de1bb02 100644 --- a/core/tests/ts-integration/jest.config.json +++ b/core/tests/ts-integration/jest.config.json @@ -14,6 +14,7 @@ "testTimeout": 605000, "globalSetup": "/src/jest-setup/global-setup.ts", "globalTeardown": "/src/jest-setup/global-teardown.ts", + "testEnvironment": "/src/jest-setup/env.ts", "setupFilesAfterEnv": [ "/src/jest-setup/add-matchers.ts" ], diff --git a/core/tests/ts-integration/package.json b/core/tests/ts-integration/package.json index 9d53420edaa..8e5c0cf7470 100644 --- a/core/tests/ts-integration/package.json +++ b/core/tests/ts-integration/package.json @@ -25,6 +25,7 @@ "ethers": "^6.7.1", "hardhat": "=2.22.2", "jest": "^29.0.3", + "jest-environment-node": "^29.0.3", "jest-matcher-utils": "^29.0.3", "node-fetch": "^2.6.1", "ts-jest": "^29.0.1", diff --git a/core/tests/ts-integration/src/context-owner.ts b/core/tests/ts-integration/src/context-owner.ts index 71c8227af2c..e77cdf1a053 100644 --- a/core/tests/ts-integration/src/context-owner.ts +++ b/core/tests/ts-integration/src/context-owner.ts @@ -7,6 +7,7 @@ import { lookupPrerequisites } from './prerequisites'; import { Reporter } from './reporter'; import { scaledGasPrice } from './helpers'; import { RetryProvider } from './retry-provider'; +import { killPidWithAllChilds } from 'utils/build/kill'; // These amounts of ETH would be provided to each test suite through its "main" account. // It is assumed to be enough to run a set of "normal" transactions. @@ -624,6 +625,11 @@ export class TestContextOwner { // Then propagate the exception. throw error; } + if (this.env.l2NodePid !== undefined) { + this.reporter.startAction(`Terminating L2 node process`); + await killPidWithAllChilds(this.env.l2NodePid, 9); + this.reporter.finishAction(); + } } /** @@ -648,6 +654,10 @@ export class TestContextOwner { // into account. If the same wallet would be reused (e.g. on stage), it'll just have to // deposit less next time. } + + setL2NodePid(newPid: number) { + this.env.l2NodePid = newPid; + } } /** diff --git a/core/tests/ts-integration/src/env.ts b/core/tests/ts-integration/src/env.ts index ffef0fce5ce..1de917c2362 100644 --- a/core/tests/ts-integration/src/env.ts +++ b/core/tests/ts-integration/src/env.ts @@ -6,7 +6,17 @@ import { DataAvailabityMode, NodeMode, TestEnvironment } from './types'; import { Reporter } from './reporter'; import * as yaml from 'yaml'; import { L2_BASE_TOKEN_ADDRESS } from 'zksync-ethers/build/utils'; -import { loadConfig, loadEcosystem, shouldLoadConfigFromFile } from 'utils/build/file-configs'; +import { FileConfig, loadConfig, loadEcosystem, shouldLoadConfigFromFile } from 'utils/build/file-configs'; +import { NodeSpawner } from './utils'; +import { logsTestPath } from 'utils/build/logs'; +import * as nodefs from 'node:fs/promises'; +import { exec } from 'utils'; + +const enableConsensus = process.env.ENABLE_CONSENSUS === 'true'; + +async function logsPath(chain: string, name: string): Promise { + return await logsTestPath(chain, 'logs/server/', name); +} /** * Attempts to connect to server. @@ -60,8 +70,10 @@ function getMainWalletPk(pathToHome: string): string { /* Loads the environment for file based configs. */ -async function loadTestEnvironmentFromFile(chain: string): Promise { +async function loadTestEnvironmentFromFile(fileConfig: FileConfig): Promise { + let chain = fileConfig.chain!; const pathToHome = path.join(__dirname, '../../../..'); + let spawnNode = process.env.SPAWN_NODE; let nodeMode; if (process.env.EXTERNAL_NODE == 'true') { nodeMode = NodeMode.External; @@ -75,18 +87,42 @@ async function loadTestEnvironmentFromFile(chain: string): Promise { - const { loadFromFile, chain } = shouldLoadConfigFromFile(); + const fileConfig = shouldLoadConfigFromFile(); - if (loadFromFile) { - return await loadTestEnvironmentFromFile(chain); + if (fileConfig.loadFromFile) { + return await loadTestEnvironmentFromFile(fileConfig); } return await loadTestEnvironmentFromEnv(); } @@ -257,6 +294,7 @@ export async function loadTestEnvironmentFromEnv(): Promise { network, mainWalletPK, l2NodeUrl, + l2NodePid: undefined, l1NodeUrl, wsL2NodeUrl, healthcheckPort, diff --git a/core/tests/ts-integration/src/jest-setup/env.ts b/core/tests/ts-integration/src/jest-setup/env.ts new file mode 100644 index 00000000000..77bbfc92911 --- /dev/null +++ b/core/tests/ts-integration/src/jest-setup/env.ts @@ -0,0 +1,14 @@ +import NodeEnvironment from 'jest-environment-node'; +import type { EnvironmentContext, JestEnvironmentConfig } from '@jest/environment'; + +export default class IntegrationTestEnvironment extends NodeEnvironment { + constructor(config: JestEnvironmentConfig, context: EnvironmentContext) { + super(config, context); + } + + override async setup() { + await super.setup(); + // Provide access to raw console in order to produce less cluttered debug messages + this.global.rawWriteToConsole = console.log; + } +} diff --git a/core/tests/ts-integration/src/jest-setup/global-setup.ts b/core/tests/ts-integration/src/jest-setup/global-setup.ts index d84d70fe69d..27a47a1e933 100644 --- a/core/tests/ts-integration/src/jest-setup/global-setup.ts +++ b/core/tests/ts-integration/src/jest-setup/global-setup.ts @@ -11,16 +11,14 @@ declare global { */ async function performSetup(_globalConfig: any, _projectConfig: any) { // Perform the test initialization. - // This is an expensive operation that preceeds running any tests, as we need + // This is an expensive operation that precedes running any tests, as we need // to deposit & distribute funds, deploy some contracts, and perform basic server checks. // Jest writes an initial message without a newline, so we have to do it manually. console.log(''); + globalThis.rawWriteToConsole = console.log; - // Before starting any actual logic, we need to ensure that the server is running (it may not - // be the case, for example, right after deployment on stage). - - const testEnvironment = await loadTestEnvironment(); + let testEnvironment = await loadTestEnvironment(); const testContextOwner = new TestContextOwner(testEnvironment); const testContext = await testContextOwner.setupContext(); diff --git a/core/tests/ts-integration/src/l1-provider.ts b/core/tests/ts-integration/src/l1-provider.ts new file mode 100644 index 00000000000..39b0397cd06 --- /dev/null +++ b/core/tests/ts-integration/src/l1-provider.ts @@ -0,0 +1,82 @@ +import { + ethers, + JsonRpcProvider, + Network, + TransactionRequest, + TransactionResponse, + TransactionResponseParams +} from 'ethers'; +import { Reporter } from './reporter'; +import { AugmentedTransactionResponse } from './transaction-response'; + +export class L1Provider extends JsonRpcProvider { + readonly reporter: Reporter; + + constructor(url: string, reporter?: Reporter) { + super(url, undefined, { batchMaxCount: 1 }); + this.reporter = reporter ?? new Reporter(); + } + + override _wrapTransactionResponse(tx: TransactionResponseParams, network: Network): L1TransactionResponse { + const base = super._wrapTransactionResponse(tx, network); + return new L1TransactionResponse(base, this.reporter); + } +} + +class L1TransactionResponse extends ethers.TransactionResponse implements AugmentedTransactionResponse { + public readonly kind = 'L1'; + private isWaitingReported: boolean = false; + private isReceiptReported: boolean = false; + + constructor(base: ethers.TransactionResponse, public readonly reporter: Reporter) { + super(base, base.provider); + } + + override async wait(confirmations?: number, timeout?: number) { + if (!this.isWaitingReported) { + this.reporter.debug( + `Started waiting for L1 transaction ${this.hash} (from=${this.from}, nonce=${this.nonce})` + ); + this.isWaitingReported = true; + } + + const receipt = await super.wait(confirmations, timeout); + if (receipt !== null && !this.isReceiptReported) { + this.reporter.debug( + `Obtained receipt for L1 transaction ${this.hash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + this.isReceiptReported = true; + } + return receipt; + } + + override replaceableTransaction(startBlock: number): L1TransactionResponse { + const base = super.replaceableTransaction(startBlock); + return new L1TransactionResponse(base, this.reporter); + } +} + +/** Wallet that retries `sendTransaction` requests on "nonce expired" errors, provided that it's possible (i.e., no nonce is set in the request). */ +export class RetryableL1Wallet extends ethers.Wallet { + constructor(key: string, provider: L1Provider) { + super(key, provider); + } + + override async sendTransaction(tx: TransactionRequest): Promise { + const reporter = (this.provider!).reporter; + while (true) { + try { + return await super.sendTransaction(tx); + } catch (err: any) { + // For unknown reason, `reth` sometimes returns outdated transaction count under load, leading to transactions getting rejected. + // This is a workaround for this issue. + reporter.debug('L1 transaction request failed', tx, err); + if (err.code === 'NONCE_EXPIRED' && (tx.nonce === null || tx.nonce === undefined)) { + reporter.debug('Retrying L1 transaction request', tx); + } else { + throw err; + } + } + } + } +} diff --git a/core/tests/ts-integration/src/matchers/transaction.ts b/core/tests/ts-integration/src/matchers/transaction.ts index 89e90b6d5f1..ac5bf8e77ea 100644 --- a/core/tests/ts-integration/src/matchers/transaction.ts +++ b/core/tests/ts-integration/src/matchers/transaction.ts @@ -1,7 +1,8 @@ import { TestMessage } from './matcher-helpers'; import { MatcherModifier } from '../modifiers'; import * as zksync from 'zksync-ethers'; -import { AugmentedTransactionResponse } from '../retry-provider'; +import { AugmentedTransactionResponse } from '../transaction-response'; +import { ethers } from 'ethers'; // This file contains implementation of matchers for ZKsync/ethereum transaction. // For actual doc-comments, see `typings/jest.d.ts` file. @@ -207,7 +208,7 @@ function fail(message: string) { * * @returns If check has failed, returns a Jest error object. Otherwise, returns `undefined`. */ -function checkReceiptFields(request: zksync.types.TransactionResponse, receipt: zksync.types.TransactionReceipt) { +function checkReceiptFields(request: ethers.TransactionResponseParams, receipt: zksync.types.TransactionReceipt) { const errorMessageBuilder = new TestMessage() .matcherHint('.checkReceiptFields') .line('Transaction receipt is not properly formatted. Transaction request:') diff --git a/core/tests/ts-integration/src/reporter.ts b/core/tests/ts-integration/src/reporter.ts index 903ff3101ef..e6a11f0725b 100644 --- a/core/tests/ts-integration/src/reporter.ts +++ b/core/tests/ts-integration/src/reporter.ts @@ -102,7 +102,7 @@ export class Reporter { // Timestamps only make sense to include in tests. const timestampString = testName === undefined ? '' : timestamp(`${new Date().toISOString()} `); const testString = testName ? info(` [${testName}]`) : ''; - console.log(this.indent(`${timestampString}DEBUG${testString}: ${message}`), ...args); + rawWriteToConsole(this.indent(`${timestampString}DEBUG${testString}: ${message}`), ...args); } } diff --git a/core/tests/ts-integration/src/retry-provider.ts b/core/tests/ts-integration/src/retry-provider.ts index 1763c0e4edf..51d88357c6c 100644 --- a/core/tests/ts-integration/src/retry-provider.ts +++ b/core/tests/ts-integration/src/retry-provider.ts @@ -1,12 +1,15 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { Reporter } from './reporter'; +import { AugmentedTransactionResponse } from './transaction-response'; +import { L1Provider, RetryableL1Wallet } from './l1-provider'; /** * RetryProvider retries every RPC request if it detects a timeout-related issue on the server side. */ export class RetryProvider extends zksync.Provider { private readonly reporter: Reporter; + private readonly knownTransactionHashes: Set = new Set(); constructor(_url?: string | { url: string; timeout: number }, network?: ethers.Networkish, reporter?: Reporter) { let url; @@ -55,15 +58,63 @@ export class RetryProvider extends zksync.Provider { } } + override _wrapTransactionResponse(txResponse: any): L2TransactionResponse { + const base = super._wrapTransactionResponse(txResponse); + this.knownTransactionHashes.add(base.hash); + return new L2TransactionResponse(base, this.reporter); + } + override _wrapTransactionReceipt(receipt: any): zksync.types.TransactionReceipt { const wrapped = super._wrapTransactionReceipt(receipt); - this.reporter.debug( - `Obtained receipt for transaction ${receipt.transactionHash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` - ); + if (!this.knownTransactionHashes.has(receipt.transactionHash)) { + this.knownTransactionHashes.add(receipt.transactionHash); + this.reporter.debug( + `Obtained receipt for L2 transaction ${receipt.transactionHash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + } return wrapped; } } -export interface AugmentedTransactionResponse extends zksync.types.TransactionResponse { - readonly reporter?: Reporter; +class L2TransactionResponse extends zksync.types.TransactionResponse implements AugmentedTransactionResponse { + public readonly kind = 'L2'; + private isWaitingReported: boolean = false; + private isReceiptReported: boolean = false; + + constructor(base: zksync.types.TransactionResponse, public readonly reporter: Reporter) { + super(base, base.provider); + } + + override async wait(confirmations?: number) { + if (!this.isWaitingReported) { + this.reporter.debug( + `Started waiting for L2 transaction ${this.hash} (from=${this.from}, nonce=${this.nonce})` + ); + this.isWaitingReported = true; + } + const receipt = await super.wait(confirmations); + if (receipt !== null && !this.isReceiptReported) { + this.reporter.debug( + `Obtained receipt for L2 transaction ${this.hash}: blockNumber=${receipt.blockNumber}, status=${receipt.status}` + ); + this.isReceiptReported = true; + } + return receipt; + } + + override replaceableTransaction(startBlock: number): L2TransactionResponse { + const base = super.replaceableTransaction(startBlock); + return new L2TransactionResponse(base, this.reporter); + } +} + +/** Wallet that retries expired nonce errors for L1 transactions. */ +export class RetryableWallet extends zksync.Wallet { + constructor(privateKey: string, l2Provider: RetryProvider, l1Provider: L1Provider) { + super(privateKey, l2Provider, l1Provider); + } + + override ethWallet(): RetryableL1Wallet { + return new RetryableL1Wallet(this.privateKey, this._providerL1()); + } } diff --git a/core/tests/ts-integration/src/test-master.ts b/core/tests/ts-integration/src/test-master.ts index 09fddd1589c..297116b0b51 100644 --- a/core/tests/ts-integration/src/test-master.ts +++ b/core/tests/ts-integration/src/test-master.ts @@ -2,9 +2,10 @@ import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { TestEnvironment, TestContext } from './types'; import { claimEtherBack } from './context-owner'; -import { RetryProvider } from './retry-provider'; +import { RetryableWallet, RetryProvider } from './retry-provider'; import { Reporter } from './reporter'; import { bigIntReviver } from './helpers'; +import { L1Provider } from './l1-provider'; /** * Test master is a singleton class (per suite) that is capable of providing wallets to the suite. @@ -19,8 +20,8 @@ export class TestMaster { private readonly env: TestEnvironment; readonly reporter: Reporter; - private readonly l1Provider: ethers.JsonRpcProvider; - private readonly l2Provider: zksync.Provider; + private readonly l1Provider: L1Provider; + private readonly l2Provider: RetryProvider; private readonly mainWallet: zksync.Wallet; private readonly subAccounts: zksync.Wallet[] = []; @@ -52,7 +53,7 @@ export class TestMaster { if (!suiteWalletPK) { throw new Error(`Wallet for ${suiteName} suite was not provided`); } - this.l1Provider = new ethers.JsonRpcProvider(this.env.l1NodeUrl); + this.l1Provider = new L1Provider(this.env.l1NodeUrl, this.reporter); this.l2Provider = new RetryProvider( { url: this.env.l2NodeUrl, @@ -71,7 +72,7 @@ export class TestMaster { this.l2Provider.pollingInterval = 5000; } - this.mainWallet = new zksync.Wallet(suiteWalletPK, this.l2Provider, this.l1Provider); + this.mainWallet = new RetryableWallet(suiteWalletPK, this.l2Provider, this.l1Provider); } /** @@ -112,7 +113,7 @@ export class TestMaster { */ newEmptyAccount(): zksync.Wallet { const randomPK = ethers.Wallet.createRandom().privateKey; - const newWallet = new zksync.Wallet(randomPK, this.l2Provider, this.l1Provider); + const newWallet = new RetryableWallet(randomPK, this.l2Provider, this.l1Provider); this.subAccounts.push(newWallet); return newWallet; } diff --git a/core/tests/ts-integration/src/transaction-response.ts b/core/tests/ts-integration/src/transaction-response.ts new file mode 100644 index 00000000000..a104b0107ed --- /dev/null +++ b/core/tests/ts-integration/src/transaction-response.ts @@ -0,0 +1,9 @@ +import { ethers } from 'ethers'; +import { Reporter } from './reporter'; + +export interface AugmentedTransactionResponse extends ethers.TransactionResponseParams { + readonly kind: 'L1' | 'L2'; + readonly reporter?: Reporter; + + wait(confirmations?: number, timeout?: number): Promise; +} diff --git a/core/tests/ts-integration/src/types.ts b/core/tests/ts-integration/src/types.ts index 4975b7b612c..c513480c1b4 100644 --- a/core/tests/ts-integration/src/types.ts +++ b/core/tests/ts-integration/src/types.ts @@ -55,6 +55,10 @@ export interface TestEnvironment { * Mode of the l2 node */ nodeMode: NodeMode; + /* + * L2 node PID + */ + l2NodePid: number | undefined; /** * Plaintext name of the L1 network name (i.e. `localhost` or `goerli`). */ diff --git a/core/tests/ts-integration/src/utils.ts b/core/tests/ts-integration/src/utils.ts new file mode 100644 index 00000000000..128d0be57d0 --- /dev/null +++ b/core/tests/ts-integration/src/utils.ts @@ -0,0 +1,197 @@ +import { spawn as _spawn, ChildProcessWithoutNullStreams, type ProcessEnvOptions } from 'child_process'; +import { assert } from 'chai'; +import { FileConfig } from 'utils/build/file-configs'; +import { killPidWithAllChilds } from 'utils/build/kill'; +import * as utils from 'utils'; +import fs from 'node:fs/promises'; +import * as zksync from 'zksync-ethers'; +import { + deleteInternalEnforcedL1GasPrice, + deleteInternalEnforcedPubdataPrice, + setInternalEnforcedL1GasPrice, + setInternalEnforcedPubdataPrice, + setTransactionSlots +} from '../tests/utils'; + +// executes a command in background and returns a child process handle +// by default pipes data to parent's stdio but this can be overridden +export function runServerInBackground({ + components, + stdio, + cwd, + env, + useZkInception, + chain +}: { + components?: string[]; + stdio: any; + cwd?: ProcessEnvOptions['cwd']; + env?: ProcessEnvOptions['env']; + useZkInception?: boolean; + newL1GasPrice?: string; + newPubdataPrice?: string; + chain?: string; +}): ChildProcessWithoutNullStreams { + let command = ''; + if (useZkInception) { + command = 'zk_inception server'; + if (chain) { + command += ` --chain ${chain}`; + } + } else { + command = 'zk server'; + } + if (components && components.length > 0) { + command += ` --components=${components.join(',')}`; + } + command = command.replace(/\n/g, ' '); + console.log(`Run command ${command}`); + return _spawn(command, { stdio: stdio, shell: true, detached: true, cwd, env }); +} + +export interface MainNodeSpawnOptions { + enableConsensus: boolean; + ethClientWeb3Url: string; + apiWeb3JsonRpcHttpUrl: string; + baseTokenAddress: string; +} + +export enum NodeType { + MAIN = 'zksync_server', + EXT = 'zksync_external_node' +} + +export class Node { + constructor(public proc: ChildProcessWithoutNullStreams, public l2NodeUrl: string, private readonly type: TYPE) {} + + public async terminate() { + try { + await killPidWithAllChilds(this.proc.pid!, 9); + } catch (err) { + console.log(`ignored error: ${err}`); + } + } + + /** + * Terminates all main node processes running. + * + * WARNING: This is not safe to use when running nodes on multiple chains. + */ + public static async killAll(type: NodeType) { + try { + await utils.exec(`killall -KILL ${type}`); + } catch (err) { + console.log(`ignored error: ${err}`); + } + } + + public async killAndWaitForShutdown() { + await this.terminate(); + // Wait until it's really stopped. + let iter = 0; + while (iter < 30) { + try { + let provider = new zksync.Provider(this.l2NodeUrl); + await provider.getBlockNumber(); + await utils.sleep(2); + iter += 1; + } catch (_) { + // When exception happens, we assume that server died. + return; + } + } + // It's going to panic anyway, since the server is a singleton entity, so better to exit early. + throw new Error(`${this.type} didn't stop after a kill request`); + } +} + +export class NodeSpawner { + public constructor( + private readonly pathToHome: string, + private readonly logs: fs.FileHandle, + private readonly fileConfig: FileConfig, + private readonly options: MainNodeSpawnOptions, + private env?: ProcessEnvOptions['env'] + ) {} + + public async spawnMainNode(newL1GasPrice?: string, newPubdataPrice?: string): Promise> { + const env = this.env ?? process.env; + const { fileConfig, pathToHome, options, logs } = this; + + const testMode = newPubdataPrice || newL1GasPrice; + + console.log('New L1 Gas Price: ', newL1GasPrice); + console.log('New Pubdata Price: ', newPubdataPrice); + + if (fileConfig.loadFromFile) { + setTransactionSlots(pathToHome, fileConfig, testMode ? 1 : 8192); + + if (newL1GasPrice) { + setInternalEnforcedL1GasPrice(pathToHome, fileConfig, parseFloat(newL1GasPrice)); + } else { + deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); + } + + if (newPubdataPrice) { + setInternalEnforcedPubdataPrice(pathToHome, fileConfig, parseFloat(newPubdataPrice)); + } else { + deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); + } + } else { + env['DATABASE_MERKLE_TREE_MODE'] = 'full'; + + if (newPubdataPrice) { + env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_PUBDATA_PRICE'] = newPubdataPrice; + } + + if (newL1GasPrice) { + // We need to ensure that each transaction gets into its own batch for more fair comparison. + env['ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_L1_GAS_PRICE'] = newL1GasPrice; + } + + if (testMode) { + // We need to ensure that each transaction gets into its own batch for more fair comparison. + env['CHAIN_STATE_KEEPER_TRANSACTION_SLOTS'] = '1'; + } + } + + let components = 'api,tree,eth,state_keeper,da_dispatcher,vm_runner_protective_reads'; + if (options.enableConsensus) { + components += ',consensus'; + } + if (options.baseTokenAddress != zksync.utils.LEGACY_ETH_ADDRESS) { + components += ',base_token_ratio_persister'; + } + let proc = runServerInBackground({ + components: [components], + stdio: ['ignore', logs, logs], + cwd: pathToHome, + env: env, + useZkInception: fileConfig.loadFromFile, + chain: fileConfig.chain + }); + + // Wait until the main node starts responding. + await waitForNodeToStart(proc, options.apiWeb3JsonRpcHttpUrl); + return new Node(proc, options.apiWeb3JsonRpcHttpUrl, NodeType.MAIN); + } +} + +async function waitForNodeToStart(proc: ChildProcessWithoutNullStreams, l2Url: string) { + while (true) { + try { + const l2Provider = new zksync.Provider(l2Url); + const blockNumber = await l2Provider.getBlockNumber(); + if (blockNumber != 0) { + console.log(`Initialized node API on ${l2Url}; latest block: ${blockNumber}`); + break; + } + } catch (err) { + if (proc.exitCode != null) { + assert.fail(`server failed to start, exitCode = ${proc.exitCode}`); + } + console.log(`Node waiting for API on ${l2Url}`); + await utils.sleep(1); + } + } +} diff --git a/core/tests/ts-integration/tests/api/debug.test.ts b/core/tests/ts-integration/tests/api/debug.test.ts index 054aa57cf64..2af18c8438b 100644 --- a/core/tests/ts-integration/tests/api/debug.test.ts +++ b/core/tests/ts-integration/tests/api/debug.test.ts @@ -50,7 +50,7 @@ describe('Debug methods', () => { output: '0x', revertReason: 'Error function_selector = 0x, data = 0x', to: BOOTLOADER_FORMAL_ADDRESS, - type: 'Call', + type: 'call', value: expect.any(String), calls: expect.any(Array) }; @@ -75,7 +75,7 @@ describe('Debug methods', () => { input: expect.any(String), output: '0x', to: BOOTLOADER_FORMAL_ADDRESS, - type: 'Call', + type: 'call', value: expect.any(String), calls: expect.any(Array) // We intentionally skip `error` and `revertReason` fields: the block may contain failing txs @@ -99,7 +99,7 @@ describe('Debug methods', () => { output: '0x', revertReason: null, to: BOOTLOADER_FORMAL_ADDRESS, - type: 'Call', + type: 'call', value: '0x0', calls: expect.any(Array) }; diff --git a/core/tests/ts-integration/tests/api/web3.test.ts b/core/tests/ts-integration/tests/api/web3.test.ts index 79789e74447..6f1b6c3aa6b 100644 --- a/core/tests/ts-integration/tests/api/web3.test.ts +++ b/core/tests/ts-integration/tests/api/web3.test.ts @@ -843,6 +843,10 @@ describe('web3 API compatibility tests', () => { expect(parseInt(response.oldestBlock)).toEqual(receipt.blockNumber - 1); expect(response.baseFeePerGas).toHaveLength(3); + expect(response.baseFeePerBlobGas).toHaveLength(3); + expect(response.gasUsedRatio).toHaveLength(2); + expect(response.blobGasUsedRatio).toHaveLength(2); + expect(response.l2PubdataPrice).toHaveLength(2); for (let i = 0; i < 2; i += 1) { const expectedBaseFee = (await alice.provider.getBlock(receipt.blockNumber - 1 + i)).baseFeePerGas; expect(BigInt(response.baseFeePerGas[i])).toEqual(expectedBaseFee); diff --git a/core/tests/ts-integration/tests/fees.test.ts b/core/tests/ts-integration/tests/fees.test.ts index 8d5b7a23a94..c41f5943efb 100644 --- a/core/tests/ts-integration/tests/fees.test.ts +++ b/core/tests/ts-integration/tests/fees.test.ts @@ -9,23 +9,27 @@ * sure that the test is maintained does not get broken. * */ -import * as utils from 'utils'; -import * as fs from 'fs'; -import { TestMaster } from '../src'; +import fs from 'node:fs/promises'; +import { TestContextOwner, TestMaster } from '../src'; import * as zksync from 'zksync-ethers'; import * as ethers from 'ethers'; import { DataAvailabityMode, Token } from '../src/types'; import { SYSTEM_CONTEXT_ADDRESS, getTestContract } from '../src/helpers'; +import { loadConfig, shouldLoadConfigFromFile } from 'utils/build/file-configs'; +import { logsTestPath } from 'utils/build/logs'; +import path from 'path'; +import { NodeSpawner, Node, NodeType } from '../src/utils'; +import { deleteInternalEnforcedL1GasPrice, deleteInternalEnforcedPubdataPrice, setTransactionSlots } from './utils'; +import { killPidWithAllChilds } from 'utils/build/kill'; + +declare global { + var __ZKSYNC_TEST_CONTEXT_OWNER__: TestContextOwner; +} const UINT32_MAX = 2n ** 32n - 1n; const MAX_GAS_PER_PUBDATA = 50_000n; -const logs = fs.createWriteStream('fees.log', { flags: 'a' }); - -// Unless `RUN_FEE_TEST` is provided, skip the test suit -const testFees = process.env.RUN_FEE_TEST ? describe : describe.skip; - // The L1 gas prices under which the test will be conducted. // For CI we use only 2 gas prices to not slow it down too much. const L1_GAS_PRICES_TO_TEST = process.env.CI @@ -47,22 +51,84 @@ const L1_GAS_PRICES_TO_TEST = process.env.CI 2_000_000_000_000n // 2000 gwei ]; -testFees('Test fees', () => { +// Unless `RUN_FEE_TEST` is provided, skip the test suit +const testFees = process.env.RUN_FEE_TEST ? describe : describe.skip; + +testFees('Test fees', function () { let testMaster: TestMaster; let alice: zksync.Wallet; let tokenDetails: Token; let aliceErc20: zksync.Contract; - beforeAll(() => { + let mainLogs: fs.FileHandle; + let baseTokenAddress: string; + let ethClientWeb3Url: string; + let apiWeb3JsonRpcHttpUrl: string; + let mainNodeSpawner: NodeSpawner; + let mainNode: Node; + + const fileConfig = shouldLoadConfigFromFile(); + const pathToHome = path.join(__dirname, '../../../..'); + const enableConsensus = process.env.ENABLE_CONSENSUS == 'true'; + + async function logsPath(chain: string | undefined, name: string): Promise { + chain = chain ? chain : 'default'; + return await logsTestPath(chain, 'logs/server/fees', name); + } + + beforeAll(async () => { testMaster = TestMaster.getInstance(__filename); - alice = testMaster.mainAccount(); + let l2Node = testMaster.environment().l2NodePid; + if (l2Node !== undefined) { + await killPidWithAllChilds(l2Node, 9); + } + + if (!fileConfig.loadFromFile) { + ethClientWeb3Url = process.env.ETH_CLIENT_WEB3_URL!; + apiWeb3JsonRpcHttpUrl = process.env.API_WEB3_JSON_RPC_HTTP_URL!; + baseTokenAddress = process.env.CONTRACTS_BASE_TOKEN_ADDR!; + } else { + const generalConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'general.yaml' + }); + const secretsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'secrets.yaml' + }); + const contractsConfig = loadConfig({ + pathToHome, + chain: fileConfig.chain, + config: 'contracts.yaml' + }); + + ethClientWeb3Url = secretsConfig.l1.l1_rpc_url; + apiWeb3JsonRpcHttpUrl = generalConfig.api.web3_json_rpc.http_url; + baseTokenAddress = contractsConfig.l1.base_token_addr; + } + + const pathToMainLogs = await logsPath(fileConfig.chain, 'server.log'); + mainLogs = await fs.open(pathToMainLogs, 'a'); + console.log(`Writing server logs to ${pathToMainLogs}`); + + mainNodeSpawner = new NodeSpawner(pathToHome, mainLogs, fileConfig, { + enableConsensus, + ethClientWeb3Url, + apiWeb3JsonRpcHttpUrl, + baseTokenAddress + }); + + mainNode = await mainNodeSpawner.spawnMainNode(); + alice = testMaster.mainAccount(); tokenDetails = testMaster.environment().erc20Token; aliceErc20 = new ethers.Contract(tokenDetails.l1Address, zksync.utils.IERC20, alice.ethWallet()); }); - test('Test fees', async () => { + test('Test all fees', async () => { const receiver = ethers.Wallet.createRandom().address; // Getting ETH price in gas. @@ -110,6 +176,10 @@ testFees('Test fees', () => { 'ERC20 transfer (to old):\n\n' ]; for (const gasPrice of L1_GAS_PRICES_TO_TEST) { + // For the sake of simplicity, we'll use the same pubdata price as the L1 gas price. + await mainNode.killAndWaitForShutdown(); + mainNode = await mainNodeSpawner.spawnMainNode(gasPrice.toString(), gasPrice.toString()); + reports = await appendResults( alice, [feeTestL1Receipt, feeTestL1Receipt, feeTestL1ReceiptERC20, feeTestL1ReceiptERC20], @@ -163,8 +233,8 @@ testFees('Test fees', () => { // that the gasLimit is indeed over u32::MAX, which is the most important tested property. const requiredPubdataPrice = minimalL2GasPrice * 100_000n; - await setInternalL1GasPrice( - alice._providerL2(), + await mainNode.killAndWaitForShutdown(); + mainNode = await mainNodeSpawner.spawnMainNode( requiredPubdataPrice.toString(), requiredPubdataPrice.toString() ); @@ -176,6 +246,7 @@ testFees('Test fees', () => { const tx = await l1Messenger.sendToL1(largeData, { type: 0 }); expect(tx.gasLimit > UINT32_MAX).toBeTruthy(); const receipt = await tx.wait(); + console.log(`Gas used ${receipt.gasUsed}`); expect(receipt.gasUsed > UINT32_MAX).toBeTruthy(); // Let's also check that the same transaction would work as eth_call @@ -207,10 +278,16 @@ testFees('Test fees', () => { }); afterAll(async () => { + await testMaster.deinitialize(); + await mainNode.killAndWaitForShutdown(); // Returning the pubdata price to the default one - await setInternalL1GasPrice(alice._providerL2(), undefined, undefined, true); - await testMaster.deinitialize(); + // Restore defaults + setTransactionSlots(pathToHome, fileConfig, 8192); + deleteInternalEnforcedL1GasPrice(pathToHome, fileConfig); + deleteInternalEnforcedPubdataPrice(pathToHome, fileConfig); + mainNode = await mainNodeSpawner.spawnMainNode(); + __ZKSYNC_TEST_CONTEXT_OWNER__.setL2NodePid(mainNode.proc.pid!); }); }); @@ -221,9 +298,6 @@ async function appendResults( newL1GasPrice: bigint, reports: string[] ): Promise { - // For the sake of simplicity, we'll use the same pubdata price as the L1 gas price. - await setInternalL1GasPrice(sender._providerL2(), newL1GasPrice.toString(), newL1GasPrice.toString()); - if (originalL1Receipts.length !== reports.length && originalL1Receipts.length !== transactionRequests.length) { throw new Error('The array of receipts and reports have different length'); } @@ -274,75 +348,3 @@ async function updateReport( return oldReport + gasReport; } - -async function killServerAndWaitForShutdown(provider: zksync.Provider) { - await utils.exec('pkill zksync_server'); - // Wait until it's really stopped. - let iter = 0; - while (iter < 30) { - try { - await provider.getBlockNumber(); - await utils.sleep(2); - iter += 1; - } catch (_) { - // When exception happens, we assume that server died. - return; - } - } - // It's going to panic anyway, since the server is a singleton entity, so better to exit early. - throw new Error("Server didn't stop after a kill request"); -} - -async function setInternalL1GasPrice( - provider: zksync.Provider, - newL1GasPrice?: string, - newPubdataPrice?: string, - disconnect?: boolean -) { - // Make sure server isn't running. - try { - await killServerAndWaitForShutdown(provider); - } catch (_) {} - - // Run server in background. - let command = 'zk server --components api,tree,eth,state_keeper,da_dispatcher,vm_runner_protective_reads'; - command = `DATABASE_MERKLE_TREE_MODE=full ${command}`; - - if (newPubdataPrice) { - command = `ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_PUBDATA_PRICE=${newPubdataPrice} ${command}`; - } - - if (newL1GasPrice) { - // We need to ensure that each transaction gets into its own batch for more fair comparison. - command = `ETH_SENDER_GAS_ADJUSTER_INTERNAL_ENFORCED_L1_GAS_PRICE=${newL1GasPrice} ${command}`; - } - - const testMode = newPubdataPrice || newL1GasPrice; - if (testMode) { - // We need to ensure that each transaction gets into its own batch for more fair comparison. - command = `CHAIN_STATE_KEEPER_TRANSACTION_SLOTS=1 ${command}`; - } - - const zkSyncServer = utils.background({ command, stdio: [null, logs, logs] }); - - if (disconnect) { - zkSyncServer.unref(); - } - - // Server may need some time to recompile if it's a cold run, so wait for it. - let iter = 0; - let mainContract; - while (iter < 30 && !mainContract) { - try { - mainContract = await provider.getMainContractAddress(); - } catch (_) { - await utils.sleep(2); - iter += 1; - } - } - if (!mainContract) { - throw new Error('Server did not start'); - } - - await utils.sleep(10); -} diff --git a/core/tests/ts-integration/tests/utils.ts b/core/tests/ts-integration/tests/utils.ts new file mode 100644 index 00000000000..24df8a170c2 --- /dev/null +++ b/core/tests/ts-integration/tests/utils.ts @@ -0,0 +1,81 @@ +import * as fs from 'fs'; +import { getConfigPath } from 'utils/build/file-configs'; + +export function setInternalEnforcedPubdataPrice(pathToHome: string, fileConfig: any, value: number) { + setGasAdjusterProperty(pathToHome, fileConfig, 'internal_enforced_pubdata_price', value); +} + +export function setInternalEnforcedL1GasPrice(pathToHome: string, fileConfig: any, value: number) { + setGasAdjusterProperty(pathToHome, fileConfig, 'internal_enforced_l1_gas_price', value); +} + +export function deleteInternalEnforcedPubdataPrice(pathToHome: string, fileConfig: any) { + deleteProperty(pathToHome, fileConfig, 'internal_enforced_pubdata_price'); +} + +export function deleteInternalEnforcedL1GasPrice(pathToHome: string, fileConfig: any) { + deleteProperty(pathToHome, fileConfig, 'internal_enforced_l1_gas_price'); +} + +export function setTransactionSlots(pathToHome: string, fileConfig: any, value: number) { + setPropertyInGeneralConfig(pathToHome, fileConfig, 'transaction_slots', value); +} + +function setPropertyInGeneralConfig(pathToHome: string, fileConfig: any, property: string, value: number) { + const generalConfigPath = getConfigPath({ + pathToHome, + chain: fileConfig.chain, + configsFolder: 'configs', + config: 'general.yaml' + }); + const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); + + const regex = new RegExp(`${property}:\\s*\\d+(\\.\\d+)?`, 'g'); + const newGeneralConfig = generalConfig.replace(regex, `${property}: ${value}`); + + fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); +} + +function setGasAdjusterProperty(pathToHome: string, fileConfig: any, property: string, value: number) { + const generalConfigPath = getConfigPath({ + pathToHome, + chain: fileConfig.chain, + configsFolder: 'configs', + config: 'general.yaml' + }); + const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); + + // Define the regex pattern to check if the property already exists + const propertyRegex = new RegExp(`(^\\s*${property}:\\s*\\d+(\\.\\d+)?$)`, 'm'); + const gasAdjusterRegex = new RegExp('(^\\s*gas_adjuster:.*$)', 'gm'); + + let newGeneralConfig; + + if (propertyRegex.test(generalConfig)) { + // If the property exists, modify its value + newGeneralConfig = generalConfig.replace(propertyRegex, ` ${property}: ${value}`); + } else { + // If the property does not exist, add it under the gas_adjuster section + newGeneralConfig = generalConfig.replace(gasAdjusterRegex, `$1\n ${property}: ${value}`); + } + + fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); +} + +function deleteProperty(pathToHome: string, fileConfig: any, property: string) { + const generalConfigPath = getConfigPath({ + pathToHome, + chain: fileConfig.chain, + configsFolder: 'configs', + config: 'general.yaml' + }); + const generalConfig = fs.readFileSync(generalConfigPath, 'utf8'); + + // Define the regex pattern to find the property line and remove it completely + const propertyRegex = new RegExp(`^\\s*${property}:.*\\n?`, 'm'); + + // Remove the line if the property exists + const newGeneralConfig = generalConfig.replace(propertyRegex, ''); + + fs.writeFileSync(generalConfigPath, newGeneralConfig, 'utf8'); +} diff --git a/core/tests/ts-integration/typings/jest.d.ts b/core/tests/ts-integration/typings/jest.d.ts index 4d8f1c3530c..3bb62732cf7 100644 --- a/core/tests/ts-integration/typings/jest.d.ts +++ b/core/tests/ts-integration/typings/jest.d.ts @@ -1,6 +1,8 @@ import { MatcherModifier } from '../src/matchers/transaction-modifiers'; export declare global { + function rawWriteToConsole(message: string, ...args: any[]); + namespace jest { interface Matchers { // Generic matchers diff --git a/core/tests/vm-benchmark/src/transaction.rs b/core/tests/vm-benchmark/src/transaction.rs index 90e1c6360b8..d5fedfa4df9 100644 --- a/core/tests/vm-benchmark/src/transaction.rs +++ b/core/tests/vm-benchmark/src/transaction.rs @@ -47,7 +47,7 @@ pub fn get_deploy_tx_with_gas_limit(code: &[u8], gas_limit: u32, nonce: u32) -> .collect(); let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), calldata, Nonce(nonce), tx_fee(gas_limit), @@ -76,7 +76,7 @@ fn tx_fee(gas_limit: u32) -> Fee { pub fn get_transfer_tx(nonce: u32) -> Transaction { let mut signed = L2Tx::new_signed( - PRIVATE_KEY.address(), + Some(PRIVATE_KEY.address()), vec![], // calldata Nonce(nonce), tx_fee(1_000_000), @@ -109,7 +109,7 @@ pub fn get_load_test_deploy_tx() -> Transaction { factory_deps.push(LOAD_TEST_CONTRACT.bytecode.clone()); let mut signed = L2Tx::new_signed( - CONTRACT_DEPLOYER_ADDRESS, + Some(CONTRACT_DEPLOYER_ADDRESS), create_calldata, Nonce(0), tx_fee(100_000_000), @@ -147,7 +147,7 @@ pub fn get_load_test_tx(nonce: u32, gas_limit: u32, params: LoadTestParams) -> T .expect("cannot encode `execute` inputs"); let mut signed = L2Tx::new_signed( - *LOAD_TEST_CONTRACT_ADDRESS, + Some(*LOAD_TEST_CONTRACT_ADDRESS), calldata, Nonce(nonce), tx_fee(gas_limit), diff --git a/core/tests/vm-benchmark/src/vm.rs b/core/tests/vm-benchmark/src/vm.rs index f3c00667c7d..f4a0010f29e 100644 --- a/core/tests/vm-benchmark/src/vm.rs +++ b/core/tests/vm-benchmark/src/vm.rs @@ -88,7 +88,7 @@ impl BenchmarkingVmFactory for Fast { system_env: SystemEnv, storage: &'static InMemoryStorage, ) -> Self::Instance { - vm_fast::Vm::new(batch_env, system_env, storage) + vm_fast::Vm::custom(batch_env, system_env, storage) } } @@ -157,11 +157,9 @@ impl BenchmarkingVm { pub fn run_transaction_full(&mut self, tx: &Transaction) -> VmExecutionResultAndLogs { self.0.make_snapshot(); - let (compression_result, tx_result) = self.0.inspect_transaction_with_bytecode_compression( - Default::default(), - tx.clone(), - true, - ); + let (compression_result, tx_result) = self + .0 + .execute_transaction_with_bytecode_compression(tx.clone(), true); compression_result.expect("compressing bytecodes failed"); if matches!(tx_result.result, ExecutionResult::Halt { .. }) { @@ -175,7 +173,7 @@ impl BenchmarkingVm { pub fn instruction_count(&mut self, tx: &Transaction) -> usize { self.0.push_transaction(tx.clone()); let count = Rc::new(RefCell::new(0)); - self.0.inspect(Default::default(), VmExecutionMode::OneTx); // FIXME: re-enable instruction counting once new tracers are merged + self.0.execute(VmExecutionMode::OneTx); // FIXME: re-enable instruction counting once new tracers are merged count.take() } } diff --git a/deny.toml b/deny.toml index c2775fc057c..dc5a32c2c07 100644 --- a/deny.toml +++ b/deny.toml @@ -8,12 +8,15 @@ feature-depth = 1 [advisories] ignore = [ + "RUSTSEC-2024-0375", # atty dependency being unmaintained, dependency of clap and criterion, we would need to update to newer major of dependencies "RUSTSEC-2024-0320", # yaml_rust dependency being unmaintained, dependency in core, we should consider moving to yaml_rust2 fork "RUSTSEC-2020-0168", # mach dependency being unmaintained, dependency in consensus, we should consider moving to mach2 fork "RUSTSEC-2024-0370", # `cs_derive` needs to be updated to not rely on `proc-macro-error` # all below caused by StructOpt which we still use and we should move to clap v3 instead + "RUSTSEC-2024-0375", "RUSTSEC-2021-0145", "RUSTSEC-2021-0139", + "RUSTSEC-2024-0375", ] [licenses] diff --git a/docker-compose-cpu-runner.yml b/docker-compose-cpu-runner.yml index e0f751130eb..beb54f3ade9 100644 --- a/docker-compose-cpu-runner.yml +++ b/docker-compose-cpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner-cuda-12-0.yml b/docker-compose-gpu-runner-cuda-12-0.yml index f2089446a41..35a0faeb962 100644 --- a/docker-compose-gpu-runner-cuda-12-0.yml +++ b/docker-compose-gpu-runner-cuda-12-0.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose-gpu-runner.yml b/docker-compose-gpu-runner.yml index 35c6c3778f2..f95ae0d5f54 100644 --- a/docker-compose-gpu-runner.yml +++ b/docker-compose-gpu-runner.yml @@ -11,7 +11,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config ports: - 127.0.0.1:8545:8545 diff --git a/docker-compose.yml b/docker-compose.yml index f2471c6bd17..d805ccc844e 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -13,7 +13,7 @@ services: source: ./etc/reth/chaindata target: /chaindata - command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 600ms --chain /chaindata/reth_config + command: node --dev --datadir /rethdata --http --http.addr 0.0.0.0 --http.port 8545 --http.corsdomain "*" --dev.block-time 300ms --chain /chaindata/reth_config postgres: image: "postgres:14" diff --git a/docker/Makefile b/docker/Makefile new file mode 100644 index 00000000000..c469587c8ff --- /dev/null +++ b/docker/Makefile @@ -0,0 +1,110 @@ +DOCKER_BUILD_CMD=docker build +PROTOCOL_VERSION=2.0 +CONTEXT=../ +# Additional package versions +DOCKER_VERSION_MIN=27.1.2 +NODE_VERSION_MIN=20.17.0 +YARN_VERSION_MIN=1.22.19 +RUST_VERSION=nightly-2024-08-01 +SQLX_CLI_VERSION=0.8.1 +FORGE_MIN_VERSION=0.2.0 + +# Versions and packages checks +check-nodejs: + @command -v node >/dev/null 2>&1 || { echo >&2 "Node.js is not installed. Please install Node.js v$(NODE_VERSION_MIN) or higher."; exit 1; } + @NODE_VERSION=$$(node --version | sed 's/v//'); \ + if [ "$$(echo $$NODE_VERSION $(NODE_VERSION_MIN) | tr " " "\n" | sort -V | head -n1)" != "$(NODE_VERSION_MIN)" ]; then \ + echo "Node.js version $$NODE_VERSION is too low. Please update to v$(NODE_VERSION_MIN)."; exit 1; \ + fi + +check-yarn: + @command -v yarn >/dev/null 2>&1 || { echo >&2 "Yarn is not installed. Please install Yarn v$(YARN_VERSION_MIN) or higher."; exit 1; } + @YARN_VERSION=$$(yarn --version); \ + if [ "$$(echo $$YARN_VERSION $(YARN_VERSION_MIN) | tr " " "\n" | sort -V | head -n1)" != "$(YARN_VERSION_MIN)" ]; then \ + echo "Yarn version $$YARN_VERSION is too low. Please update to v$(YARN_VERSION_MIN)."; exit 1; \ + fi + +check-rust: + @command -v rustc >/dev/null 2>&1 || { echo >&2 "Rust is not installed. Please install Rust v$(RUST_VERSION)."; exit 1; } + @command rustup install $(RUST_VERSION) >/dev/null 2>&1 + +check-sqlx-cli: + @command -v sqlx >/dev/null 2>&1 || { echo >&2 "sqlx-cli is not installed. Please install sqlx-cli v$(SQLX_CLI_VERSION)"; exit 1; } + @SQLX_CLI_VERSION_LOCAL=$$(sqlx --version | cut -d' ' -f2); \ + if [ "$$(echo $$SQLX_CLI_VERSION_LOCAL $(SQLX_CLI_VERSION) | tr " " "\n" | sort -V | head -n1)" != "$(SQLX_CLI_VERSION)" ]; then \ + echo "sqlx-cli version $$SQLX_CLI_VERSION is wrong. Please update to v$(SQLX_CLI_VERSION)"; exit 1; \ + fi + +check-docker: + @command -v docker >/dev/null 2>&1 || { echo >&2 "Docker is not installed. Please install Docker v$(DOCKER_VERSION_MIN) or higher."; exit 1; } + @DOCKER_VERSION=$$(docker --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'); \ + if [ "$$(echo $$DOCKER_VERSION $(DOCKER_VERSION_MIN) | tr " " "\n" | sort -V | head -n1)" != "$(DOCKER_VERSION_MIN)" ]; then \ + echo "Docker version $$DOCKER_VERSION is too low. Please update to v$(DOCKER_VERSION_MIN) or higher."; exit 1; \ + fi + +check-foundry: + @command -v forge --version >/dev/null 2>&1 || { echo >&2 "Foundry is not installed. Please install Foundry"; exit 1; } + @FORGE_VERSION=$$(forge --version | grep -oE '[0-9]+\.[0-9]+\.[0-9]+'); \ + if [ "$$(echo $$FORGE_VERSION $(FORGE_MIN_VERSION) | tr " " "\n" | sort -V | head -n1)" != "$(FORGE_MIN_VERSION)" ]; then \ + echo "Forge version $$FORGE_VERSION is too low. Please update to v$(FORGE_MIN_VERSION) or higher."; exit 1; \ + fi + +# Check for all required tools +check-tools: check-nodejs check-yarn check-rust check-sqlx-cli check-docker check-foundry + @echo "All required tools are installed." + +# Check that contracts are checkout properly +check-contracts: + @if [ ! -d ../contracts/l1-contracts/lib/forge-std/foundry.toml ] || [ -z "$$(ls -A ../contracts/l1-contracts/lib/forge-std/foundry.toml)" ]; then \ + echo "l1-contracts git submodule is missing. Please re-download repo with 'git clone --recurse-submodules https://github.com/matter-labs/zksync-era.git'"; \ + exit 1; \ + fi + +# Build and download needed contracts +# TODO Remove mkdir once we use foundry inside contracts repo +prepare-contracts: check-tools check-contracts + @cd ../ && \ + export ZKSYNC_HOME=$$(pwd) && \ + export PATH=$$PATH:$${ZKSYNC_HOME}/bin && \ + zkt || true && \ + zk_supervisor contracts && \ + mkdir -p contracts/l1-contracts/artifacts + +# Download setup-key +prepare-keys: + @cd ../ && \ + curl -LO https://storage.googleapis.com/matterlabs-setup-keys-us/setup-keys/setup_2\^26.key + +# Targets for building each container +build-contract-verifier: check-tools prepare-contracts prepare-keys + $(DOCKER_BUILD_CMD) --file contract-verifier/Dockerfile --load \ + --tag contract-verifier:$(PROTOCOL_VERSION) $(CONTEXT) + +build-server-v2: check-tools prepare-contracts prepare-keys + $(DOCKER_BUILD_CMD) --file server-v2/Dockerfile --load \ + --tag server-v2:$(PROTOCOL_VERSION) $(CONTEXT) + +build-circuit-prover-gpu: check-tools prepare-keys + $(DOCKER_BUILD_CMD) --file circuit-prover-gpu/Dockerfile --load \ + --platform linux/amd64 \ + --tag prover:$(PROTOCOL_VERSION) $(CONTEXT) + +build-witness-generator: check-tools prepare-keys + $(DOCKER_BUILD_CMD) --file witness-generator/Dockerfile --load \ + --tag witness-generator:$(PROTOCOL_VERSION) $(CONTEXT) + +build-external-node: check-tools prepare-contracts + $(DOCKER_BUILD_CMD) --file external-node/Dockerfile --load \ + --tag external-node:$(PROTOCOL_VERSION) $(CONTEXT) + +# Build all containers +build-all: build-contract-verifier build-server-v2 build-witness-generator build-circuit-prover-gpu build-external-node cleanup + +# Clean generated images +clean-all: + @git submodule update --recursive + docker rmi contract-verifier:$(PROTOCOL_VERSION) >/dev/null 2>&1 + docker rmi server-v2:$(PROTOCOL_VERSION) >/dev/null 2>&1 + docker rmi prover:$(PROTOCOL_VERSION) >/dev/null 2>&1 + docker rmi witness-generator:$(PROTOCOL_VERSION) >/dev/null 2>&1 + docker rmi external-node:$(PROTOCOL_VERSION) >/dev/null 2>&1 diff --git a/docker/circuit-prover-gpu-gar/Dockerfile b/docker/circuit-prover-gpu-gar/Dockerfile new file mode 100644 index 00000000000..3dfc6bdf9ad --- /dev/null +++ b/docker/circuit-prover-gpu-gar/Dockerfile @@ -0,0 +1,15 @@ +ARG PROVER_IMAGE +FROM us-docker.pkg.dev/matterlabs-infra/matterlabs-docker/circuit-prover-gpu:2.0-$PROVER_IMAGE as prover + +FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 as app + +# HACK copying to root is the only way to make Docker layer caching work for these files for some reason +COPY *.bin / + +RUN apt-get update && apt-get install -y libpq5 ca-certificates openssl && rm -rf /var/lib/apt/lists/* + +# copy finalization hints required for assembly generation +COPY --from=prover prover/data/keys/ /prover/data/keys/ +COPY --from=prover /usr/bin/zksync_circuit_prover /usr/bin/ + +ENTRYPOINT ["zksync_circuit_prover"] diff --git a/docker/circuit-prover-gpu/Dockerfile b/docker/circuit-prover-gpu/Dockerfile new file mode 100644 index 00000000000..8e193e20589 --- /dev/null +++ b/docker/circuit-prover-gpu/Dockerfile @@ -0,0 +1,57 @@ +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder + +ARG DEBIAN_FRONTEND=noninteractive + +ARG CUDA_ARCH=89 +ENV CUDAARCHS=${CUDA_ARCH} + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + +RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ + pkg-config build-essential libclang-dev && \ + rm -rf /var/lib/apt/lists/* + +ENV RUSTUP_HOME=/usr/local/rustup \ + CARGO_HOME=/usr/local/cargo \ + PATH=/usr/local/cargo/bin:$PATH + +RUN curl https://sh.rustup.rs -sSf | bash -s -- -y && \ + rustup install nightly-2024-08-01 && \ + rustup default nightly-2024-08-01 + +RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/releases/download/v3.24.2/cmake-3.24.2-linux-x86_64.sh && \ + chmod +x cmake-3.24.2-linux-x86_64.sh && \ + ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local + +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + +WORKDIR /usr/src/zksync +COPY . . + +RUN cd prover && cargo build --release --bin zksync_circuit_prover + +FROM nvidia/cuda:12.2.0-runtime-ubuntu22.04 + +RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* + +# copy finalization hints required for assembly generation +COPY prover/data/keys/ /prover/data/keys/ + +COPY --from=builder /usr/src/zksync/prover/target/release/zksync_circuit_prover /usr/bin/ + +ENTRYPOINT ["zksync_circuit_prover"] diff --git a/docker/contract-verifier/Dockerfile b/docker/contract-verifier/Dockerfile index 7ed1906b857..7943dae835a 100644 --- a/docker/contract-verifier/Dockerfile +++ b/docker/contract-verifier/Dockerfile @@ -1,14 +1,27 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +ARG CUDA_ARCH=89 +ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cargo build --release -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates wget python3 jq && rm -rf /var/lib/apt/lists/* +RUN apt-get update && apt-get install -y wget python3 jq && rm -rf /var/lib/apt/lists/* # install zksolc 1.3.x RUN skip_versions="v1.3.12 v1.3.15 v1.3.20" && \ @@ -34,7 +47,7 @@ RUN mkdir -p /etc/zksolc-bin/vm-1.5.0-a167aa3 && \ chmod +x /etc/zksolc-bin/vm-1.5.0-a167aa3/zksolc # install zksolc 1.5.x -RUN for VERSION in $(seq -f "v1.5.%g" 0 3); do \ +RUN for VERSION in $(seq -f "v1.5.%g" 0 4); do \ mkdir -p /etc/zksolc-bin/$VERSION && \ wget https://github.com/matter-labs/zksolc-bin/raw/main/linux-amd64/zksolc-linux-amd64-musl-$VERSION -O /etc/zksolc-bin/$VERSION/zksolc && \ chmod +x /etc/zksolc-bin/$VERSION/zksolc; \ diff --git a/docker/contract-verifier/install-all-solc.sh b/docker/contract-verifier/install-all-solc.sh index bc7cec143cc..4fe992f8357 100755 --- a/docker/contract-verifier/install-all-solc.sh +++ b/docker/contract-verifier/install-all-solc.sh @@ -26,7 +26,7 @@ done # Download zkVM solc list=( "0.8.25-1.0.0" "0.8.24-1.0.0" "0.8.23-1.0.0" "0.8.22-1.0.0" "0.8.21-1.0.0" "0.8.20-1.0.0" "0.8.19-1.0.0" "0.8.18-1.0.0" "0.8.17-1.0.0" "0.8.16-1.0.0" "0.8.15-1.0.0" "0.8.14-1.0.0" "0.8.13-1.0.0" "0.8.12-1.0.0" "0.8.11-1.0.0" "0.8.10-1.0.0" "0.8.9-1.0.0" "0.8.8-1.0.0" "0.8.7-1.0.0" "0.8.6-1.0.0" "0.8.5-1.0.0" "0.8.4-1.0.0" "0.8.3-1.0.0" "0.8.2-1.0.0" "0.8.1-1.0.0" "0.8.0-1.0.0" "0.7.6-1.0.0" "0.7.5-1.0.0" "0.7.4-1.0.0" "0.7.3-1.0.0" "0.7.2-1.0.0" "0.7.1-1.0.0" "0.7.0-1.0.0" "0.6.12-1.0.0" "0.6.11-1.0.0" "0.6.10-1.0.0" "0.6.9-1.0.0" "0.6.8-1.0.0" "0.6.7-1.0.0" "0.6.6-1.0.0" "0.6.5-1.0.0" "0.6.4-1.0.0" "0.6.3-1.0.0" "0.6.2-1.0.0" "0.6.1-1.0.0" "0.6.0-1.0.0" "0.5.17-1.0.0" "0.5.16-1.0.0" "0.5.15-1.0.0" "0.5.14-1.0.0" "0.5.13-1.0.0" "0.5.12-1.0.0" "0.5.11-1.0.0" "0.5.10-1.0.0" "0.5.9-1.0.0" "0.5.8-1.0.0" "0.5.7-1.0.0" "0.5.6-1.0.0" "0.5.5-1.0.0" "0.5.4-1.0.0" "0.5.3-1.0.0" "0.5.2-1.0.0" "0.5.1-1.0.0" "0.5.0-1.0.0" "0.4.26-1.0.0" "0.4.25-1.0.0" "0.4.24-1.0.0" "0.4.23-1.0.0" "0.4.22-1.0.0" "0.4.21-1.0.0" "0.4.20-1.0.0" "0.4.19-1.0.0" "0.4.18-1.0.0" "0.4.17-1.0.0" "0.4.16-1.0.0" "0.4.15-1.0.0" "0.4.14-1.0.0" "0.4.13-1.0.0" "0.4.12-1.0.0" - "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" + "0.8.27-1.0.1" "0.8.26-1.0.1" "0.8.25-1.0.1" "0.8.24-1.0.1" "0.8.23-1.0.1" "0.8.22-1.0.1" "0.8.21-1.0.1" "0.8.20-1.0.1" "0.8.19-1.0.1" "0.8.18-1.0.1" "0.8.17-1.0.1" "0.8.16-1.0.1" "0.8.15-1.0.1" "0.8.14-1.0.1" "0.8.13-1.0.1" "0.8.12-1.0.1" "0.8.11-1.0.1" "0.8.10-1.0.1" "0.8.9-1.0.1" "0.8.8-1.0.1" "0.8.7-1.0.1" "0.8.6-1.0.1" "0.8.5-1.0.1" "0.8.4-1.0.1" "0.8.3-1.0.1" "0.8.2-1.0.1" "0.8.1-1.0.1" "0.8.0-1.0.1" "0.7.6-1.0.1" "0.7.5-1.0.1" "0.7.4-1.0.1" "0.7.3-1.0.1" "0.7.2-1.0.1" "0.7.1-1.0.1" "0.7.0-1.0.1" "0.6.12-1.0.1" "0.6.11-1.0.1" "0.6.10-1.0.1" "0.6.9-1.0.1" "0.6.8-1.0.1" "0.6.7-1.0.1" "0.6.6-1.0.1" "0.6.5-1.0.1" "0.6.4-1.0.1" "0.6.3-1.0.1" "0.6.2-1.0.1" "0.6.1-1.0.1" "0.6.0-1.0.1" "0.5.17-1.0.1" "0.5.16-1.0.1" "0.5.15-1.0.1" "0.5.14-1.0.1" "0.5.13-1.0.1" "0.5.12-1.0.1" "0.5.11-1.0.1" "0.5.10-1.0.1" "0.5.9-1.0.1" "0.5.8-1.0.1" "0.5.7-1.0.1" "0.5.6-1.0.1" "0.5.5-1.0.1" "0.5.4-1.0.1" "0.5.3-1.0.1" "0.5.2-1.0.1" "0.5.1-1.0.1" "0.5.0-1.0.1" "0.4.26-1.0.1" "0.4.25-1.0.1" "0.4.24-1.0.1" "0.4.23-1.0.1" "0.4.22-1.0.1" "0.4.21-1.0.1" "0.4.20-1.0.1" "0.4.19-1.0.1" "0.4.18-1.0.1" "0.4.17-1.0.1" "0.4.16-1.0.1" "0.4.15-1.0.1" "0.4.14-1.0.1" "0.4.13-1.0.1" "0.4.12-1.0.1" ) for version in ${list[@]}; do diff --git a/docker/external-node/Dockerfile b/docker/external-node/Dockerfile index 7d276941dc4..1012eecfc16 100644 --- a/docker/external-node/Dockerfile +++ b/docker/external-node/Dockerfile @@ -1,14 +1,23 @@ # Will work locally only after prior contracts build -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cargo build --release -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY --from=builder /usr/src/zksync/target/release/zksync_external_node /usr/bin COPY --from=builder /usr/src/zksync/target/release/block_reverter /usr/bin @@ -19,9 +28,9 @@ COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/sys COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ +COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ # TODO Remove once we use foundry inside contracts repo COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ -COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ diff --git a/docker/proof-fri-gpu-compressor/Dockerfile b/docker/proof-fri-gpu-compressor/Dockerfile index 45f2ffa51b0..e744787c825 100644 --- a/docker/proof-fri-gpu-compressor/Dockerfile +++ b/docker/proof-fri-gpu-compressor/Dockerfile @@ -1,10 +1,20 @@ # Will work locally only after prior universal setup key download -FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ git \ pkg-config build-essential libclang-dev && \ @@ -22,6 +32,14 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-fri-gateway/Dockerfile b/docker/prover-fri-gateway/Dockerfile index de59451fee8..3e631b35156 100644 --- a/docker/prover-fri-gateway/Dockerfile +++ b/docker/prover-fri-gateway/Dockerfile @@ -1,14 +1,24 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_prover_fri_gateway -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest # copy VK required for proof wrapping COPY prover/data/keys/ /prover/data/keys/ diff --git a/docker/prover-gpu-fri/Dockerfile b/docker/prover-gpu-fri/Dockerfile index ad3ff1ff719..2a680a49c5d 100644 --- a/docker/prover-gpu-fri/Dockerfile +++ b/docker/prover-gpu-fri/Dockerfile @@ -1,10 +1,21 @@ -FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 as builder +FROM nvidia/cuda:12.2.0-devel-ubuntu22.04 AS builder ARG DEBIAN_FRONTEND=noninteractive ARG CUDA_ARCH=89 ENV CUDAARCHS=${CUDA_ARCH} +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + RUN apt-get update && apt-get install -y curl clang openssl libssl-dev gcc g++ \ pkg-config build-essential libclang-dev && \ rm -rf /var/lib/apt/lists/* @@ -21,6 +32,14 @@ RUN curl -Lo cmake-3.24.2-linux-x86_64.sh https://github.com/Kitware/CMake/relea chmod +x cmake-3.24.2-linux-x86_64.sh && \ ./cmake-3.24.2-linux-x86_64.sh --skip-license --prefix=/usr/local +# install sccache +RUN curl -Lo sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + cp sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/sbin/ && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz && \ + rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl && \ + chmod +x /usr/local/sbin/sccache + WORKDIR /usr/src/zksync COPY . . diff --git a/docker/prover-job-monitor/Dockerfile b/docker/prover-job-monitor/Dockerfile index 25d5dcd3af9..88b46df27ff 100644 --- a/docker/prover-job-monitor/Dockerfile +++ b/docker/prover-job-monitor/Dockerfile @@ -1,14 +1,24 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_prover_job_monitor -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY --from=builder /usr/src/zksync/prover/target/release/zksync_prover_job_monitor /usr/bin/ diff --git a/docker/runtime-base/Dockerfile b/docker/runtime-base/Dockerfile new file mode 100644 index 00000000000..09d920b1c43 --- /dev/null +++ b/docker/runtime-base/Dockerfile @@ -0,0 +1,9 @@ +FROM debian:bookworm-slim + +RUN apt-get update && \ + apt-get install -y \ + curl \ + libpq5 \ + ca-certificates \ + && \ + rm -rf /var/lib/apt/lists/* diff --git a/docker/server-v2/Dockerfile b/docker/server-v2/Dockerfile index 07611a1d7b4..13a39133327 100644 --- a/docker/server-v2/Dockerfile +++ b/docker/server-v2/Dockerfile @@ -1,6 +1,17 @@ # Will work locally only after prior contracts build # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync @@ -8,9 +19,9 @@ COPY . . RUN cargo build --release --features=rocksdb/io-uring -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ +RUN apt-get update && apt-get install -y liburing-dev && \ rm -rf /var/lib/apt/lists/* EXPOSE 3000 @@ -25,9 +36,9 @@ COPY contracts/system-contracts/contracts-preprocessed/artifacts/ /contracts/sys COPY contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ /contracts/system-contracts/contracts-preprocessed/precompiles/artifacts/ COPY contracts/system-contracts/artifacts-zk /contracts/system-contracts/artifacts-zk COPY contracts/l1-contracts/out/ /contracts/l1-contracts/out/ +COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ # TODO Remove once we use foundry inside contracts repo COPY contracts/l1-contracts/artifacts/ /contracts/l1-contracts/artifacts/ -COPY contracts/l2-contracts/artifacts-zk/ /contracts/l2-contracts/artifacts-zk/ COPY etc/tokens/ /etc/tokens/ COPY etc/ERC20/ /etc/ERC20/ COPY etc/multivm_bootloaders/ /etc/multivm_bootloaders/ diff --git a/docker/snapshots-creator/Dockerfile b/docker/snapshots-creator/Dockerfile index 10eef06dfbb..2d3c8306498 100644 --- a/docker/snapshots-creator/Dockerfile +++ b/docker/snapshots-creator/Dockerfile @@ -1,14 +1,25 @@ # syntax=docker/dockerfile:experimental -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cargo build --release --bin snapshots_creator -FROM debian:bookworm-slim +FROM ghcr.io/matter-labs/zksync-runtime-base:latest -RUN apt-get update && apt-get install -y curl libpq5 liburing-dev ca-certificates && \ +RUN apt-get update && apt-get install -y liburing-dev && \ rm -rf /var/lib/apt/lists/* COPY --from=builder /usr/src/zksync/target/release/snapshots_creator /usr/bin diff --git a/docker/verified-sources-fetcher/Dockerfile b/docker/verified-sources-fetcher/Dockerfile index 972f85d0faf..87475f3187f 100644 --- a/docker/verified-sources-fetcher/Dockerfile +++ b/docker/verified-sources-fetcher/Dockerfile @@ -1,14 +1,26 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} + WORKDIR /usr/src/zksync COPY . . RUN cargo build --release --bin verified_sources_fetcher -FROM debian:bookworm-slim -RUN apt-get update && apt-get install -y apt-transport-https ca-certificates gnupg curl git && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest + +RUN apt-get update && apt-get install -y apt-transport-https gnupg git && rm -rf /var/lib/apt/lists/* RUN curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list diff --git a/docker/witness-generator/Dockerfile b/docker/witness-generator/Dockerfile index 2eebe07515e..29e22698771 100644 --- a/docker/witness-generator/Dockerfile +++ b/docker/witness-generator/Dockerfile @@ -1,17 +1,27 @@ -FROM matterlabs/zksync-build-base:latest AS builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive ARG RUST_FLAGS="" ENV RUSTFLAGS=${RUST_FLAGS} +ENV ZKSYNC_USE_CUDA_STUBS=true + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_witness_generator -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest COPY prover/data/keys/ /prover/data/keys/ diff --git a/docker/witness-vector-generator/Dockerfile b/docker/witness-vector-generator/Dockerfile index e315f670101..b305f89b001 100644 --- a/docker/witness-vector-generator/Dockerfile +++ b/docker/witness-vector-generator/Dockerfile @@ -1,15 +1,25 @@ -FROM matterlabs/zksync-build-base:latest as builder +FROM ghcr.io/matter-labs/zksync-build-base:latest AS builder ARG DEBIAN_FRONTEND=noninteractive +ENV ZKSYNC_USE_CUDA_STUBS=true + +# set of args for use of sccache +ARG SCCACHE_GCS_BUCKET="" +ARG SCCACHE_GCS_SERVICE_ACCOUNT="" +ARG SCCACHE_GCS_RW_MODE="" +ARG RUSTC_WRAPPER="" + +ENV SCCACHE_GCS_BUCKET=${SCCACHE_GCS_BUCKET} +ENV SCCACHE_GCS_SERVICE_ACCOUNT=${SCCACHE_GCS_SERVICE_ACCOUNT} +ENV SCCACHE_GCS_RW_MODE=${SCCACHE_GCS_RW_MODE} +ENV RUSTC_WRAPPER=${RUSTC_WRAPPER} WORKDIR /usr/src/zksync COPY . . RUN cd prover && cargo build --release --bin zksync_witness_vector_generator -FROM debian:bookworm-slim - -RUN apt-get update && apt-get install -y curl libpq5 ca-certificates && rm -rf /var/lib/apt/lists/* +FROM ghcr.io/matter-labs/zksync-runtime-base:latest # copy finalization hints required for witness vector generation COPY prover/data/keys/ /prover/data/keys/ diff --git a/docs/guides/advanced/05_how_call_works.md b/docs/guides/advanced/05_how_call_works.md index 7f283cf8e0c..5b9458ddce8 100644 --- a/docs/guides/advanced/05_how_call_works.md +++ b/docs/guides/advanced/05_how_call_works.md @@ -110,10 +110,10 @@ In this article, we covered the 'life of a call' from the RPC to the inner worki https://github.com/matter-labs/zksync-era/blob/edd48fc37bdd58f9f9d85e27d684c01ef2cac8ae/core/bin/zksync_core/src/api_server/web3/backend_jsonrpc/namespaces/eth.rs 'namespaces RPC api' [namespaces_rpc_impl]: - https://github.com/matter-labs/zksync-era/blob/main/core/lib/zksync_core/src/api_server/web3/namespaces/eth.rs#L94 + https://github.com/matter-labs/zksync-era/blob/main/core/node/api_server/src/web3/namespaces/eth.rs 'namespaces RPC implementation' [execution_sandbox]: - https://github.com/matter-labs/zksync-era/blob/main/core/lib/zksync_core/src/api_server/execution_sandbox/execute.rs + https://github.com/matter-labs/zksync-era/blob/main/core/node/api_server/src/execution_sandbox/execute.rs 'execution sandbox' [vm_code]: https://github.com/matter-labs/zksync-era/blob/ccd13ce88ff52c3135d794c6f92bec3b16f2210f/core/lib/multivm/src/versions/vm_latest/implementation/execution.rs#L108 diff --git a/docs/guides/build-docker.md b/docs/guides/build-docker.md new file mode 100644 index 00000000000..6b0608275d8 --- /dev/null +++ b/docs/guides/build-docker.md @@ -0,0 +1,49 @@ +# Build docker images + +This document explains how to build Docker images from the source code, instead of using prebuilt ones we distribute + +## Prerequisites + +Install prerequisites: see + +[Installing dependencies](./setup-dev.md) + +## Build docker files + +You may build all images with [Makefile](../../docker/Makefile) located in [docker](../../docker) directory in this +repository + +> All commands should be run from the root directory of the repository + +```shell +make -C ./docker build-all +``` + +You will get those images: + +```shell +contract-verifier:2.0 +server-v2:2.0 +prover:2.0 +witness-generator:2.0 +external-node:2.0 +``` + +Alternatively, you may build only needed components - available targets are + +```shell +make -C ./docker build-contract-verifier +make -C ./docker build-server-v2 +make -C ./docker build-circuit-prover-gpu +make -C ./docker build-witness-generator +make -C ./docker build-external-node +``` + +## Building updated images + +Simply run + +```shell +make -C ./docker clean-all +make -C ./docker build-all +``` diff --git a/docs/guides/external-node/00_quick_start.md b/docs/guides/external-node/00_quick_start.md index 3902fdc1556..67a1b89eef5 100644 --- a/docs/guides/external-node/00_quick_start.md +++ b/docs/guides/external-node/00_quick_start.md @@ -10,32 +10,31 @@ To start a mainnet instance, run: ```sh cd docker-compose-examples -docker compose --file mainnet-external-node-docker-compose.yml up +sudo docker compose --file mainnet-external-node-docker-compose.yml up ``` To reset its state, run: ```sh cd docker-compose-examples -docker compose --file mainnet-external-node-docker-compose.yml down --volumes +sudo docker compose --file mainnet-external-node-docker-compose.yml down --volumes ``` To start a testnet instance, run: ```sh cd docker-compose-examples -docker compose --file testnet-external-node-docker-compose.yml up +sudo docker compose --file testnet-external-node-docker-compose.yml up ``` To reset its state, run: ```sh cd docker-compose-examples -docker compose --file testnet-external-node-docker-compose.yml down --volumes +sudo docker compose --file testnet-external-node-docker-compose.yml down --volumes ``` -You can see the status of the node (after recovery) in -[local grafana dashboard](http://localhost:3000/d/0/external-node). +You can see the status of the node (after recovery) in [local grafana dashboard](http://localhost:3000/dashboards). Those commands start ZKsync node locally inside docker. @@ -45,8 +44,6 @@ The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be ac > > The node will recover from a snapshot on it's first run, this may take up to 10h. Before the recovery is finished, the > API server won't serve any requests. - -> [!NOTE] > > If you need access to historical transaction data, please use recovery from DB dumps (see Advanced setup section) @@ -54,24 +51,18 @@ The HTTP JSON-RPC API can be accessed on port `3060` and WebSocket API can be ac > [!NOTE] > -> This configuration is only for nodes that use snapshots recovery (the default for docker-compose setup), for -> requirements for nodes running from DB dump see -> [03_running.md](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/03_running.md). DB dumps -> are a way to start ZKsync node with full historical transactions history - -> [!NOTE] +> Those are requirements for nodes that use snapshots recovery and history pruning (the default for docker-compose +> setup). > -> Those are requirements for a freshly started node and the the state grows about 1TB per month for mainnet - -> [!NOTE] +> For requirements for nodes running from DB dump see the [running](03_running.md) section. DB dumps are a way to start +> ZKsync node with full historical transactions history. > -> To stop state growth, you can enable state pruning by uncommenting `EN_PRUNING_ENABLED: true` in docker compose file, -> you can read more about pruning in -> [08_pruning.md](https://github.com/matter-labs/zksync-era/blob/main/docs/guides/external-node/08_pruning.md) +> For nodes with pruning disabled, expect the storage requirements on mainnet to grow at 1TB per month. If you want to +> stop historical DB pruning you can read more about this in the [pruning](08_pruning.md) section. - 32 GB of RAM and a relatively modern CPU -- 30 GB of storage for testnet nodes -- 300 GB of storage for mainnet nodes +- 50 GB of storage for testnet nodes +- 500 GB of storage for mainnet nodes - 100 Mbps connection (1 Gbps+ recommended) ## Advanced setup diff --git a/docs/guides/external-node/01_intro.md b/docs/guides/external-node/01_intro.md index c9d01d9a87f..10fc55acac2 100644 --- a/docs/guides/external-node/01_intro.md +++ b/docs/guides/external-node/01_intro.md @@ -5,28 +5,27 @@ This documentation explains the basics of the ZKsync Node. ## Disclaimers - The ZKsync node is in the alpha phase, and should be used with caution. -- The ZKsync node is a read-only replica of the main node. We are currently working on decentralizing our infrastructure - by creating a consensus node. The ZKsync node is not going to be the consensus node. +- The ZKsync node is a read-only replica of the main node. ## What is the ZKsync node The ZKsync node is a read-replica of the main (centralized) node that can be run by external parties. It functions by -fetching data from the ZKsync API and re-applying transactions locally, starting from the genesis block. The ZKsync node -shares most of its codebase with the main node. Consequently, when it re-applies transactions, it does so exactly as the -main node did in the past. +receiving blocks from the ZKsync network and re-applying transactions locally, starting from the genesis block. The +ZKsync node shares most of its codebase with the main node. Consequently, when it re-applies transactions, it does so +exactly as the main node did in the past. **It has two modes of initialization:** - recovery from a DB dump, in Ethereum terms this corresponds to archival node - recovery from a snapshot, in Ethereum terms this corresponds to light node, such nodes will only have access to - transactions data from after the node was initialized. The database is currently not pruned on such nodes. + transactions data from after the node was initialized. The database can be pruned on such nodes. ## High-level overview At a high level, the ZKsync node can be seen as an application that has the following modules: - API server that provides the publicly available Web3 interface. -- Synchronization layer that interacts with the main node and retrieves transactions and blocks to re-execute. +- Consensus layer that interacts with the peer network and retrieves transactions and blocks to re-execute. - Sequencer component that actually executes and persists transactions received from the synchronization layer. - Several checker modules that ensure the consistency of the ZKsync node state. @@ -44,7 +43,7 @@ With the EN, you _can not_: - Generate proofs. - Submit data to L1. -A more detailed overview of the EN's components is provided in the [components](./06_components.md) section. +A more detailed overview of the EN's components is provided in the [components](06_components.md) section. ## API overview diff --git a/docs/guides/external-node/03_running.md b/docs/guides/external-node/03_running.md index 5789c34cdaa..caa528238ae 100644 --- a/docs/guides/external-node/03_running.md +++ b/docs/guides/external-node/03_running.md @@ -2,10 +2,10 @@ > [!NOTE] > -> If you want to just run node with recommended default setting, please see directory docker-compose-examples +> If you want to just run node with recommended default setting, please see the [quick start](00_quick_start.md) page. This section assumes that you have prepared a configuration file as described on the -[previous page](./02_configuration.md). +[previous page](02_configuration.md). ## System Requirements for nodes started from DB dumps @@ -80,4 +80,4 @@ If you've been running the ZKsync node for some time and are going to redeploy i - Start the EN Monitoring the node behavior and analyzing the state it's in is covered in the -[observability section](./04_observability.md). +[observability section](04_observability.md). diff --git a/docs/guides/external-node/04_observability.md b/docs/guides/external-node/04_observability.md index 538c1130b62..05b39b74c5d 100644 --- a/docs/guides/external-node/04_observability.md +++ b/docs/guides/external-node/04_observability.md @@ -1,7 +1,7 @@ # ZKsync node Observability The ZKsync node provides several options for setting up observability. Configuring logs and sentry is described in the -[configuration](./02_configuration.md) section, so this section focuses on the exposed metrics. +[configuration](02_configuration.md) section, so this section focuses on the exposed metrics. This section is written with the assumption that you're familiar with [Prometheus](https://prometheus.io/docs/introduction/overview/) and [Grafana](https://grafana.com/docs/). @@ -38,17 +38,5 @@ memory leaking. | `api_web3_call` | Histogram | `method` | Duration of Web3 API calls | | `sql_connection_acquire` | Histogram | - | Time to get an SQL connection from the connection pool | -## Interpretation - -After applying a dump, the ZKsync node has to rebuild the Merkle tree to verify the correctness of the state in -PostgreSQL. During this stage, `server_block_number { stage='tree_lightweight_mode' }` is increasing from 0 to -`server_block_number { stage='sealed' }`, while the latter does not increase (ZKsync node needs the tree to be -up-to-date to progress). - -After that, the ZKsync node has to sync with the main node. `server_block_number { stage='sealed' }` is increasing, and -`external_node_sync_lag` is decreasing. - -Once the node is synchronized, it is indicated by the `external_node_synced`. - Metrics can be used to detect anomalies in configuration, which is described in more detail in the -[next section](./05_troubleshooting.md). +[next section](05_troubleshooting.md). diff --git a/docs/guides/external-node/07_snapshots_recovery.md b/docs/guides/external-node/07_snapshots_recovery.md index dfbc7a5366c..0053717af06 100644 --- a/docs/guides/external-node/07_snapshots_recovery.md +++ b/docs/guides/external-node/07_snapshots_recovery.md @@ -2,8 +2,8 @@ Instead of initializing a node using a Postgres dump, it's possible to configure a node to recover from a protocol-level snapshot. This process is much faster and requires much less storage. Postgres database of a mainnet node recovered from -a snapshot is only about 300GB. Note that without [pruning](08_pruning.md) enabled, the node state will continuously -grow at a rate about 15GB per day. +a snapshot is less than 500GB. Note that without [pruning](08_pruning.md) enabled, the node state will continuously grow +at a rate about 15GB per day. ## How it works @@ -94,8 +94,6 @@ An example of snapshot recovery logs during the first node start: Recovery logic also exports some metrics, the main of which are as follows: -| Metric name | Type | Labels | Description | -| ------------------------------------------------------- | --------- | ------------ | --------------------------------------------------------------------- | -| `snapshots_applier_storage_logs_chunks_left_to_process` | Gauge | - | Number of storage log chunks left to process during Postgres recovery | -| `db_pruner_pruning_chunk_duration_seconds` | Histogram | `prune_type` | Latency of a single pruning iteration | -| `merkle_tree_pruning_deleted_stale_key_versions` | Gauge | `bound` | Versions (= L1 batches) pruned from the Merkle tree | +| Metric name | Type | Labels | Description | +| ------------------------------------------------------- | ----- | ------ | --------------------------------------------------------------------- | +| `snapshots_applier_storage_logs_chunks_left_to_process` | Gauge | - | Number of storage log chunks left to process during Postgres recovery | diff --git a/docs/guides/external-node/08_pruning.md b/docs/guides/external-node/08_pruning.md index 83c127f3826..7f7dfc34d4a 100644 --- a/docs/guides/external-node/08_pruning.md +++ b/docs/guides/external-node/08_pruning.md @@ -48,7 +48,7 @@ of the first block in the batch). You can configure the retention period using: EN_PRUNING_DATA_RETENTION_SEC: '259200' # 3 days ``` -The retention period can be set to any value, but for mainnet values under 21h will be ignored because a batch can only +The retention period can be set to any value, but for mainnet values under 24h will be ignored because a batch can only be pruned after it has been executed on Ethereum. Pruning can be disabled or enabled and the data retention period can be freely changed during the node lifetime. diff --git a/docs/guides/external-node/09_decentralization.md b/docs/guides/external-node/09_decentralization.md deleted file mode 100644 index caf93a85a92..00000000000 --- a/docs/guides/external-node/09_decentralization.md +++ /dev/null @@ -1,14 +0,0 @@ -# Decentralization - -In the default setup, the ZKsync node will fetch data from the ZKsync API endpoint maintained by Matter Labs. To reduce -the reliance on this centralized endpoint we have developed a decentralized p2p networking stack (aka gossipnet) which -will eventually be used instead of ZKsync API for synchronizing data. - -On the gossipnet, the data integrity will be protected by the BFT (byzantine fault-tolerant) consensus algorithm -(currently data is signed just by the main node though). - -### Add `--enable-consensus` flag to your entry point command - -For the consensus configuration to take effect you have to add `--enable-consensus` flag when running the node. You can -do that by editing the docker compose files (mainnet-external-node-docker-compose.yml or -testnet-external-node-docker-compose.yml) and uncommenting the line with `--enable-consensus`. diff --git a/docs/guides/external-node/10_treeless_mode.md b/docs/guides/external-node/09_treeless_mode.md similarity index 100% rename from docs/guides/external-node/10_treeless_mode.md rename to docs/guides/external-node/09_treeless_mode.md diff --git a/docs/guides/external-node/building-from-scratch/Dockerfile b/docs/guides/external-node/building-from-scratch/Dockerfile deleted file mode 100644 index 5b015a4545b..00000000000 --- a/docs/guides/external-node/building-from-scratch/Dockerfile +++ /dev/null @@ -1,27 +0,0 @@ -FROM matterlabs/zk-environment:latest2.0-lightweight - -RUN git clone https://github.com/matter-labs/zksync-era - -WORKDIR /usr/src/zksync/zksync-era - -# core 24.16.0 (#2608), see: https://github.com/matter-labs/zksync-era/releases -RUN git reset --hard 1ac52c5 - -ENV ZKSYNC_HOME=/usr/src/zksync/zksync-era -ENV PATH="${ZKSYNC_HOME}/bin:${PATH}" - -# build zk tool -RUN zkt - -# build rust -RUN cargo build --release -RUN cp target/release/zksync_external_node /usr/bin - -# build contracts -RUN git submodule update --init --recursive -RUN zk_supervisor contracts - -# copy migrations (node expects them to be in specific directory) -RUN cp -r core/lib/dal/migrations/ migrations - -ENTRYPOINT [ "sh", "docker/external-node/entrypoint.sh"] diff --git a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml index 01c9d323a93..c2bef23b2e4 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/mainnet_consensus_config.yaml @@ -1,5 +1,6 @@ server_addr: '0.0.0.0:3054' public_addr: '127.0.0.1:3054' +debug_page_addr: '127.0.0.1:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 gossip_static_outbound: diff --git a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml index cfcc6b9d43e..7a82705990c 100644 --- a/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml +++ b/docs/guides/external-node/docker-compose-examples/configs/testnet_consensus_config.yaml @@ -1,5 +1,6 @@ server_addr: '0.0.0.0:3054' public_addr: '127.0.0.1:3054' +debug_page_addr: '127.0.0.1:5000' max_payload_size: 5000000 gossip_dynamic_inbound_limit: 100 gossip_static_outbound: diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json new file mode 100644 index 00000000000..be869ead40b --- /dev/null +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/Consensus.json @@ -0,0 +1,1207 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "datasource", + "uid": "grafana" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 0, + "id": 3, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Number of the latest L2 block persisted in this node's database.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 0 + }, + "id": 16, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "zksync_consensus_storage_block_store_next_persisted_block - 1", + "legendFormat": "Number", + "range": true, + "refId": "A" + } + ], + "title": "L2 block number", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Number of the latest L1 batch persisted in this node's database.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 35, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "zksync_consensus_storage_batch_store_next_persisted_batch - 1", + "legendFormat": "Number", + "range": true, + "refId": "A" + } + ], + "title": "L1 batch number", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Number of L2 blocks persisted in this node's database per minute.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, + "id": 17, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "rate(zksync_consensus_storage_block_store_next_persisted_block[1m])", + "legendFormat": "Rate", + "range": true, + "refId": "A" + } + ], + "title": "L2 block rate (blocks/min)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Number of L1 batches persisted in this node's database per hour.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 36, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "rate(zksync_consensus_storage_batch_store_next_persisted_batch[1h])", + "legendFormat": "Rate", + "range": true, + "refId": "A" + } + ], + "title": "L1 batch rate (batches/h)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "How many inbound and outbound connections we have to peers in the gossip network", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 16 + }, + "id": 14, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "network_gossip_inbound_connections", + "hide": false, + "interval": "", + "legendFormat": "Inbound", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "network_gossip_outbound_connections", + "interval": "", + "legendFormat": "Outbound", + "range": true, + "refId": "B" + } + ], + "title": "Peer connections", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Display the number of the last sealed batch and of the last attested batch. Attested batch are sealed batches that have been signed by a quorum of attesters.\nIt also displays the last batch number for which we have at least one vote. Note that attesters might be malicious and provide votes for batches far in the future, so this metric might provide misleading results. ", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 0, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 16 + }, + "id": 29, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "zksync_consensus_storage_batch_store_last_persisted_batch_qc", + "legendFormat": "Attested", + "range": true, + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "zksync_consensus_storage_batch_store_next_persisted_batch-1", + "hide": false, + "legendFormat": "Sealed", + "range": true, + "refId": "B" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "network_gossip_batch_votes_last_added_vote_batch_number", + "hide": false, + "legendFormat": "Voted", + "range": true, + "refId": "C" + } + ], + "title": "L1 batch attestation", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Data received over time.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 26, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "rate(network_tcp_received_bytes[1m])", + "legendFormat": "Download", + "range": true, + "refId": "A" + } + ], + "title": "Download speed", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Data sent over time.", + "fieldConfig": { + "defaults": { + "color": { + "fixedColor": "yellow", + "mode": "fixed" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "Bps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 24 + }, + "id": 27, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "rate(network_tcp_sent_bytes[1m])", + "legendFormat": "Upload", + "range": true, + "refId": "A" + } + ], + "title": "Upload speed", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Latency of RPC client requests, in seconds.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 32 + }, + "id": 19, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "sum by(method) (increase(network_rpc_latency_seconds_sum{type=\"client_send_recv\"}[10m])) / sum by(method) (increase(network_rpc_latency_seconds_count{type=\"client_send_recv\"}[10m]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "RPC latency client", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Latency of RPC server responses, in seconds.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 20, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "sum by(method) (increase(network_rpc_latency_seconds_sum{type=\"server_recv_send\"}[$__rate_interval])) / sum by(method) (increase(network_rpc_latency_seconds_count{type=\"server_recv_send\"}[$__rate_interval]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "RPC latency server", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Rate of RPC client requests, in packets per second.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 40 + }, + "id": 21, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "sum by(method) (increase(network_rpc_latency_seconds_count{type=\"client_send_recv\"}[10m]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "RPC rate client", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Rate of RPC server responses, in packets per second.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green" + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 40 + }, + "id": 22, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "sum by(method) (increase(network_rpc_latency_seconds_count{type=\"server_recv_send\"}[10m]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "RPC rate server", + "type": "timeseries" + } + ], + "refresh": "5s", + "schemaVersion": 37, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Consensus", + "uid": "STAAEORNk", + "version": 4, + "weekStart": "" +} \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json index 0f4bedebf08..d7177ae802e 100644 --- a/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json +++ b/docs/guides/external-node/docker-compose-examples/grafana/provisioning/dashboards/General.json @@ -24,7 +24,7 @@ "editable": true, "fiscalYearStartMonth": 0, "graphTooltip": 0, - "id": 1, + "id": 2, "links": [], "liveNow": false, "panels": [ @@ -33,6 +33,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "Protocol version that this node is running.", "fieldConfig": { "defaults": { "color": { @@ -56,11 +57,151 @@ "overrides": [] }, "gridPos": { - "h": 9, + "h": 8, "w": 12, "x": 0, "y": 0 }, + "id": 8, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "external_node_protocol_version", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Protocol version", + "type": "gauge" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + } + }, + "mappings": [], + "unit": "bytes" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 0 + }, + "id": 2, + "options": { + "legend": { + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "sum by (job) (rocksdb_total_sst_size)", + "interval": "", + "legendFormat": "RocksDB", + "refId": "A" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "exemplar": true, + "expr": "sum by (job) (postgres_table_total_size_bytes)", + "hide": false, + "interval": "", + "legendFormat": "PostgresSQL", + "refId": "B" + } + ], + "title": "Total disk space usage", + "type": "piechart" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 8 + }, "id": 6, "options": { "colorMode": "value", @@ -83,10 +224,12 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "builder", "exemplar": true, - "expr": "sum by (stage) (rate(server_processed_txs{stage=~\"mempool_added\"}[$__rate_interval]))", + "expr": "sum by(stage) (rate(server_processed_txs{stage=\"state_keeper\"}[10m]))", "interval": "", "legendFormat": "", + "range": true, "refId": "A" } ], @@ -94,6 +237,75 @@ "transformations": [], "type": "stat" }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "Priority transactions (aka L1 transactions) per second", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 8 + }, + "id": 37, + "options": { + "colorMode": "value", + "graphMode": "area", + "justifyMode": "center", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "textMode": "auto" + }, + "pluginVersion": "9.3.6", + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "exemplar": true, + "expr": "sum by(stage) (rate(server_processed_l1_txs{stage=\"state_keeper\"}[10m]))", + "interval": "", + "legendFormat": "", + "range": true, + "refId": "A" + } + ], + "title": "Priority TPS", + "transformations": [], + "type": "stat" + }, { "datasource": { "type": "prometheus", @@ -155,10 +367,10 @@ "overrides": [] }, "gridPos": { - "h": 9, + "h": 8, "w": 12, - "x": 12, - "y": 0 + "x": 0, + "y": 16 }, "id": 5, "options": { @@ -179,10 +391,12 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "editorMode": "code", "exemplar": true, "expr": "sum by (stage) (external_node_fetcher_l1_batch)", "interval": "", - "legendFormat": "", + "legendFormat": "{{label_name}}", + "range": true, "refId": "A" } ], @@ -251,10 +465,10 @@ "overrides": [] }, "gridPos": { - "h": 9, + "h": 8, "w": 12, - "x": 0, - "y": 9 + "x": 12, + "y": 16 }, "id": 4, "options": { @@ -291,6 +505,7 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, + "description": "The number of transactions that are being reverted or that are succeeding.", "fieldConfig": { "defaults": { "color": { @@ -303,7 +518,7 @@ "axisPlacement": "auto", "barAlignment": 0, "drawStyle": "line", - "fillOpacity": 30, + "fillOpacity": 0, "gradientMode": "none", "hideFrom": { "legend": false, @@ -311,16 +526,16 @@ "viz": false }, "lineInterpolation": "linear", - "lineWidth": 1, + "lineWidth": 2, "pointSize": 5, "scaleDistribution": { "type": "linear" }, "showPoints": "auto", - "spanNulls": true, + "spanNulls": false, "stacking": { "group": "A", - "mode": "normal" + "mode": "none" }, "thresholdsStyle": { "mode": "off" @@ -339,24 +554,116 @@ "value": 80 } ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 24 + }, + "id": 31, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" }, - "unit": "bytes" + "editorMode": "builder", + "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions execution status", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "description": "The percentage of transactions that are being reverted or that are succeeding.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "percent" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } }, "overrides": [] }, "gridPos": { - "h": 9, + "h": 8, "w": 12, "x": 12, - "y": 9 + "y": 24 }, - "id": 2, + "id": 38, "options": { "legend": { "calcs": [], "displayMode": "list", "placement": "bottom", - "showLegend": false + "showLegend": true }, "tooltip": { "mode": "single", @@ -369,26 +676,198 @@ "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "exemplar": true, - "expr": "sum by (job) (rocksdb_total_sst_size)", - "interval": "", - "legendFormat": "RocksDB", + "editorMode": "builder", + "expr": "sum by(status) (increase(server_state_keeper_tx_execution_result[1h]))", + "legendFormat": "__auto", + "range": true, + "refId": "A" + } + ], + "title": "Transactions execution status (%)", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 32 + }, + "id": 33, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "editorMode": "builder", + "expr": "increase(server_state_keeper_miniblock_transactions_in_miniblock_sum[5m]) / increase(server_state_keeper_miniblock_transactions_in_miniblock_count[5m])", + "legendFormat": "Number", + "range": true, "refId": "A" + } + ], + "title": "Avg number of transactions in L2 block", + "type": "timeseries" + }, + { + "datasource": { + "type": "prometheus", + "uid": "PBFA97CFB590B2093" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 32 + }, + "id": 34, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom", + "showLegend": true }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "targets": [ { "datasource": { "type": "prometheus", "uid": "PBFA97CFB590B2093" }, - "exemplar": true, - "expr": "sum by (job) (postgres_table_total_size_bytes)", - "hide": false, - "interval": "", - "legendFormat": "PostgresSQL", - "refId": "B" + "editorMode": "builder", + "expr": "increase(server_state_keeper_l1_batch_transactions_in_l1_batch_sum[1h]) / increase(server_state_keeper_l1_batch_transactions_in_l1_batch_count[1h])", + "legendFormat": "Number", + "range": true, + "refId": "A" } ], - "title": "Total disk space usage", + "title": "Avg number of transactions in L1 batch", "type": "timeseries" } ], @@ -400,13 +879,13 @@ "list": [] }, "time": { - "from": "now-5m", + "from": "now-6h", "to": "now" }, "timepicker": {}, "timezone": "", - "title": "External Node", - "uid": "0", - "version": 1, + "title": "General", + "uid": "1", + "version": 9, "weekStart": "" -} +} \ No newline at end of file diff --git a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml index 64bef02b17a..9c8c5bb3142 100644 --- a/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/mainnet-external-node-docker-compose.yml @@ -65,8 +65,7 @@ services: entrypoint: [ "/usr/bin/entrypoint.sh", - # Uncomment the following line to enable consensus - # "--enable-consensus", + "--enable-consensus", ] restart: always depends_on: @@ -79,6 +78,7 @@ services: - "127.0.0.1:3060:3060" - "127.0.0.1:3061:3061" - "127.0.0.1:3081:3081" + - "127.0.0.1:5000:5000" volumes: - rocksdb:/db - ./configs:/configs @@ -96,7 +96,7 @@ services: EN_MAIN_NODE_URL: https://zksync2-mainnet.zksync.io EN_L1_CHAIN_ID: 1 EN_L2_CHAIN_ID: 324 - # EN_PRUNING_ENABLED: true + EN_PRUNING_ENABLED: true EN_STATE_CACHE_PATH: "./db/ext-node/state_keeper" EN_MERKLE_TREE_PATH: "./db/ext-node/lightweight" diff --git a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml index f865f500c5b..42c5861b79d 100644 --- a/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml +++ b/docs/guides/external-node/docker-compose-examples/testnet-external-node-docker-compose.yml @@ -65,8 +65,7 @@ services: entrypoint: [ "/usr/bin/entrypoint.sh", - # Uncomment the following line to enable consensus - # "--enable-consensus", + "--enable-consensus", ] restart: always depends_on: @@ -79,6 +78,7 @@ services: - "127.0.0.1:3060:3060" - "127.0.0.1:3061:3061" - "127.0.0.1:3081:3081" + - "127.0.0.1:5000:5000" volumes: - rocksdb:/db - ./configs:/configs @@ -96,7 +96,7 @@ services: EN_MAIN_NODE_URL: https://sepolia.era.zksync.dev EN_L1_CHAIN_ID: 11155111 EN_L2_CHAIN_ID: 300 - # EN_PRUNING_ENABLED: true + EN_PRUNING_ENABLED: true EN_STATE_CACHE_PATH: "./db/ext-node/state_keeper" EN_MERKLE_TREE_PATH: "./db/ext-node/lightweight" diff --git a/docs/guides/setup-dev.md b/docs/guides/setup-dev.md index 10eb329628c..4eef211cd3d 100644 --- a/docs/guides/setup-dev.md +++ b/docs/guides/setup-dev.md @@ -48,6 +48,10 @@ cargo install sqlx-cli --version 0.8.1 # Foundry curl -L https://foundry.paradigm.xyz | bash foundryup --branch master + +# Non CUDA (GPU) setup, can be skipped if the machine has a CUDA installed for provers +# Don't do that if you intend to run provers on your machine. Check the prover docs for a setup instead. +echo "export ZKSYNC_USE_CUDA_STUBS=true" >> ~/.bashrc # You will need to reload your `*rc` file here # Clone the repo to the desired location @@ -237,6 +241,31 @@ Go to the zksync folder and run `nix develop`. After it finishes, you are in a s [Foundry](https://book.getfoundry.sh/getting-started/installation) can be utilized for deploying smart contracts. For commands related to deployment, you can pass flags for Foundry integration. +## Non-GPU setup + +Circuit Prover requires a CUDA bindings to run. If you still want to be able to build everything locally on non-CUDA +setup, you'll need use CUDA stubs. + +For a single run, it's enough to export it on the shell: + +``` +export ZKSYNC_USE_CUDA_STUBS=true +``` + +For persistent runs, you can echo it in your ~/.rc file + +``` +echo "export ZKSYNC_USE_CUDA_STUBS=true" >> ~/.rc +``` + +Note that the same can be achieved with RUSTFLAGS (discouraged). The flag is `--cfg=no_cuda`. You can either set +RUSTFLAGS as env var, or pass it in `config.toml` (either project level or global). The config would need the following: + +```toml +[build] +rustflags = ["--cfg=no_cuda"] +``` + ## Environment Edit the lines below and add them to your shell profile file (e.g. `~/.bash_profile`, `~/.zshrc`): diff --git a/eigenda-integration.md b/eigenda-integration.md index 638719e2b4d..7291971b304 100644 --- a/eigenda-integration.md +++ b/eigenda-integration.md @@ -96,7 +96,8 @@ Modify `etc/env/file_based/wallets.yaml` and `configs/wallets.yaml` with the fol # Use your own holesky wallets, be sure they have enough funds ``` -> ⚠️ Some steps distribute ~5000ETH to some wallets, modify `AMOUNT_FOR_DISTRIBUTION_TO_WALLETS` to a lower value if needed. +> ⚠️ Some steps distribute ~5000ETH to some wallets, modify `AMOUNT_FOR_DISTRIBUTION_TO_WALLETS` to a lower value if +> needed. ### EigenProxy RPC @@ -157,7 +158,8 @@ zk_inception server --chain holesky_eigen_da ## Backup and restoration -It's possible to run the zk stack on one computer, and then migrate it to another, this is specially useful for holesky testing. +It's possible to run the zk stack on one computer, and then migrate it to another, this is specially useful for holesky +testing. ### Backup @@ -171,7 +173,8 @@ This will generate a directory inside of `ecosystem_backups` with the name `hole ### Restoration -1. Move the `ecoystem_backups/holesky_eigen_da` directory to the other computer, it should be placed in the root of the project. +1. Move the `ecoystem_backups/holesky_eigen_da` directory to the other computer, it should be placed in the root of the + project. 2. Restore the ecosystem with: diff --git a/etc/env/base/eth_sender.toml b/etc/env/base/eth_sender.toml index 31fe626c87f..ad5709551c4 100644 --- a/etc/env/base/eth_sender.toml +++ b/etc/env/base/eth_sender.toml @@ -55,8 +55,8 @@ default_priority_fee_per_gas = 1_000_000_000 max_base_fee_samples = 10_000 # These two are parameters of the base_fee_per_gas formula in GasAdjuster. # The possible formulas are: -# 1. base_fee_median * (A + B * time_in_mempool) -# 2. base_fee_median * A * B ^ time_in_mempool +# 1. base_fee_median * (A + B * time_in_mempool_in_l1_blocks) +# 2. base_fee_median * A * B ^ time_in_mempool_in_l1_blocks # Currently the second is used. # To confirm, see core/bin/zksync_core/src/eth_sender/gas_adjuster/mod.rs pricing_formula_parameter_a = 1.5 diff --git a/etc/env/base/fri_prover_gateway.toml b/etc/env/base/fri_prover_gateway.toml index 8974298a57c..f77b5d59258 100644 --- a/etc/env/base/fri_prover_gateway.toml +++ b/etc/env/base/fri_prover_gateway.toml @@ -1,6 +1,6 @@ [fri_prover_gateway] api_url="http://127.0.0.1:3320" -api_poll_duration_secs=1000 +api_poll_duration_secs=15 prometheus_listener_port=3314 prometheus_pushgateway_url="http://127.0.0.1:9091" prometheus_push_interval_ms=100 diff --git a/etc/env/base/prover_job_monitor.toml b/etc/env/base/prover_job_monitor.toml index 40cdf76b8b1..ce206c74ffd 100644 --- a/etc/env/base/prover_job_monitor.toml +++ b/etc/env/base/prover_job_monitor.toml @@ -13,3 +13,4 @@ proof_compressor_queue_reporter_run_interval_ms = 10000 prover_queue_reporter_run_interval_ms = 10000 witness_generator_queue_reporter_run_interval_ms = 10000 witness_job_queuer_run_interval_ms = 10000 +http_port = 3074 diff --git a/etc/env/base/rust.toml b/etc/env/base/rust.toml index 1bb69374ab1..d8bef020c64 100644 --- a/etc/env/base/rust.toml +++ b/etc/env/base/rust.toml @@ -7,7 +7,7 @@ RUST_LOG="""\ zksync_node_framework=info,\ zksync_block_reverter=info,\ -zksync_commitment_generator=info,\ +zksync_commitment_generator=debug,\ zksync_node_db_pruner=info,\ zksync_eth_sender=info,\ zksync_node_fee_model=info,\ diff --git a/etc/env/configs/ext-node.toml b/etc/env/configs/ext-node.toml index b2f74006559..a5eb22db5ec 100644 --- a/etc/env/configs/ext-node.toml +++ b/etc/env/configs/ext-node.toml @@ -63,7 +63,7 @@ zksync_node_consensus=info,\ zksync_consensus_bft=info,\ zksync_consensus_network=info,\ zksync_consensus_storage=info,\ -zksync_commitment_generator=info,\ +zksync_commitment_generator=debug,\ zksync_core=debug,\ zksync_dal=info,\ zksync_db_connection=info,\ diff --git a/etc/env/file_based/general.yaml b/etc/env/file_based/general.yaml index fad9469e90d..8f1792e8907 100644 --- a/etc/env/file_based/general.yaml +++ b/etc/env/file_based/general.yaml @@ -80,7 +80,7 @@ operations_manager: contract_verifier: compilation_timeout: 240 polling_interval: 1000 - prometheus_port: 3314 + prometheus_port: 3318 port: 3070 url: http://127.0.0.1:3070 threads_per_server: 128 @@ -172,7 +172,7 @@ data_handler: tee_support: true prover_gateway: api_url: http://127.0.0.1:3320 - api_poll_duration_secs: 1000 + api_poll_duration_secs: 15 prometheus_listener_port: 3310 prometheus_pushgateway_url: http://127.0.0.1:9091 prometheus_push_interval_ms: 100 @@ -287,6 +287,7 @@ prover_job_monitor: prover_queue_reporter_run_interval_ms: 10000 witness_generator_queue_reporter_run_interval_ms: 10000 witness_job_queuer_run_interval_ms: 10000 + http_port: 3074 base_token_adjuster: @@ -311,7 +312,7 @@ prometheus: observability: log_format: plain - log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=info,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" + log_directives: "zksync_node_test_utils=info,zksync_state_keeper=info,zksync_reorg_detector=info,zksync_consistency_checker=info,zksync_metadata_calculator=info,zksync_node_sync=info,zksync_node_consensus=info,zksync_contract_verification_server=info,zksync_node_api_server=info,zksync_tee_verifier_input_producer=info,zksync_node_framework=info,zksync_block_reverter=info,zksync_commitment_generator=debug,zksync_node_db_pruner=info,zksync_eth_sender=info,zksync_node_fee_model=info,zksync_node_genesis=info,zksync_house_keeper=info,zksync_proof_data_handler=info,zksync_shared_metrics=info,zksync_node_test_utils=info,zksync_vm_runner=info,zksync_consensus_bft=info,zksync_consensus_network=info,zksync_consensus_storage=info,zksync_core_leftovers=debug,zksync_server=debug,zksync_contract_verifier=debug,zksync_dal=info,zksync_db_connection=info,zksync_eth_client=info,zksync_eth_watch=debug,zksync_storage=info,zksync_db_manager=info,zksync_merkle_tree=info,zksync_state=debug,zksync_utils=debug,zksync_queued_job_processor=info,zksync_types=info,zksync_mempool=debug,loadnext=info,vm=info,zksync_object_store=info,zksync_external_node=info,zksync_witness_generator=info,zksync_prover_fri=info,zksync_witness_vector_generator=info,zksync_web3_decl=debug,zksync_health_check=debug,zksync_proof_fri_compressor=info,vise_exporter=error,snapshots_creator=debug,zksync_base_token_adjuster=debug,zksync_external_price_api=debug,zksync_external_proof_integration_api=info" # Uncomment only if needed # sentry: # url: unset diff --git a/etc/utils/src/kill.ts b/etc/utils/src/kill.ts index 7fdab85afad..71b76e71d80 100644 --- a/etc/utils/src/kill.ts +++ b/etc/utils/src/kill.ts @@ -13,7 +13,10 @@ export async function killPidWithAllChilds(pid: number, signalNumber: number) { } // We always run the test using additional tools, that means we have to kill not the main process, but the child process for (let i = childs.length - 1; i >= 0; i--) { - console.log(`kill ${childs[i]}`); - await promisify(exec)(`kill -${signalNumber} ${childs[i]}`); + try { + await promisify(exec)(`kill -${signalNumber} ${childs[i]}`); + } catch (e) { + console.log(`Failed to kill ${childs[i]} with ${e}`); + } } } diff --git a/flake.nix b/flake.nix index cc14faebfed..ef618816f9c 100644 --- a/flake.nix +++ b/flake.nix @@ -67,7 +67,6 @@ }; craneLib = (crane.mkLib pkgs).overrideToolchain rustVersion; - NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; commonArgs = { nativeBuildInputs = with pkgs;[ @@ -81,6 +80,8 @@ snappy.dev lz4.dev bzip2.dev + rocksdb + snappy.dev ]; src = with pkgs.lib.fileset; toSource { @@ -97,7 +98,9 @@ env = { OPENSSL_NO_VENDOR = "1"; - inherit NIX_OUTPATH_USED_AS_RANDOM_SEED; + ROCKSDB_LIB_DIR = "${pkgs.rocksdb.out}/lib"; + SNAPPY_LIB_DIR = "${pkgs.snappy.out}/lib"; + NIX_OUTPATH_USED_AS_RANDOM_SEED = "aaaaaaaaaa"; }; doCheck = false; diff --git a/get_all_blobs/getallblobs.js b/get_all_blobs/getallblobs.js index e1336c2d64d..d91a00645a8 100644 --- a/get_all_blobs/getallblobs.js +++ b/get_all_blobs/getallblobs.js @@ -2,236 +2,241 @@ const { Web3 } = require('web3'); const web3 = new Web3('http://127.0.0.1:8545'); const fs = require('fs'); - const abi = [ - { - "inputs": [ - { - "internalType": "uint256", - "name": "_chainId", - "type": "uint256" - }, - { - "components": [ - { - "internalType": "uint64", - "name": "batchNumber", - "type": "uint64" - }, - { - "internalType": "bytes32", - "name": "batchHash", - "type": "bytes32" - }, - { - "internalType": "uint64", - "name": "indexRepeatedStorageChanges", - "type": "uint64" - }, - { - "internalType": "uint256", - "name": "numberOfLayer1Txs", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "priorityOperationsHash", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "l2LogsTreeRoot", - "type": "bytes32" - }, - { - "internalType": "uint256", - "name": "timestamp", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "commitment", - "type": "bytes32" - } - ], - "internalType": "struct IExecutor.StoredBatchInfo", - "name": "", - "type": "tuple" - }, - { - "components": [ - { - "internalType": "uint64", - "name": "batchNumber", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "timestamp", - "type": "uint64" - }, - { - "internalType": "uint64", - "name": "indexRepeatedStorageChanges", - "type": "uint64" - }, - { - "internalType": "bytes32", - "name": "newStateRoot", - "type": "bytes32" - }, - { - "internalType": "uint256", - "name": "numberOfLayer1Txs", - "type": "uint256" - }, - { - "internalType": "bytes32", - "name": "priorityOperationsHash", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "bootloaderHeapInitialContentsHash", - "type": "bytes32" - }, - { - "internalType": "bytes32", - "name": "eventsQueueStateHash", - "type": "bytes32" - }, - { - "internalType": "bytes", - "name": "systemLogs", - "type": "bytes" - }, - { - "internalType": "bytes", - "name": "pubdataCommitments", - "type": "bytes" - } + { + inputs: [ + { + internalType: 'uint256', + name: '_chainId', + type: 'uint256' + }, + { + components: [ + { + internalType: 'uint64', + name: 'batchNumber', + type: 'uint64' + }, + { + internalType: 'bytes32', + name: 'batchHash', + type: 'bytes32' + }, + { + internalType: 'uint64', + name: 'indexRepeatedStorageChanges', + type: 'uint64' + }, + { + internalType: 'uint256', + name: 'numberOfLayer1Txs', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'priorityOperationsHash', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'l2LogsTreeRoot', + type: 'bytes32' + }, + { + internalType: 'uint256', + name: 'timestamp', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'commitment', + type: 'bytes32' + } + ], + internalType: 'struct IExecutor.StoredBatchInfo', + name: '', + type: 'tuple' + }, + { + components: [ + { + internalType: 'uint64', + name: 'batchNumber', + type: 'uint64' + }, + { + internalType: 'uint64', + name: 'timestamp', + type: 'uint64' + }, + { + internalType: 'uint64', + name: 'indexRepeatedStorageChanges', + type: 'uint64' + }, + { + internalType: 'bytes32', + name: 'newStateRoot', + type: 'bytes32' + }, + { + internalType: 'uint256', + name: 'numberOfLayer1Txs', + type: 'uint256' + }, + { + internalType: 'bytes32', + name: 'priorityOperationsHash', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'bootloaderHeapInitialContentsHash', + type: 'bytes32' + }, + { + internalType: 'bytes32', + name: 'eventsQueueStateHash', + type: 'bytes32' + }, + { + internalType: 'bytes', + name: 'systemLogs', + type: 'bytes' + }, + { + internalType: 'bytes', + name: 'pubdataCommitments', + type: 'bytes' + } + ], + internalType: 'struct IExecutor.CommitBatchInfo[]', + name: '_newBatchesData', + type: 'tuple[]' + } ], - "internalType": "struct IExecutor.CommitBatchInfo[]", - "name": "_newBatchesData", - "type": "tuple[]" - } - ], - "name": "commitBatchesSharedBridge", - "outputs": [], - "stateMutability": "nonpayable", - "type": "function" - } -] + name: 'commitBatchesSharedBridge', + outputs: [], + stateMutability: 'nonpayable', + type: 'function' + } +]; const contract = new web3.eth.Contract(abi); function hexToUtf8(hex) { - // Remove the '0x' prefix if present - if (hex.startsWith('0x')) { - hex = hex.slice(2); - } - - // Ensure the hex string has an even length - if (hex.length % 2 !== 0) { - throw new Error('Invalid hex string length'); - } - - // Convert hex string to a byte array - const bytes = []; - for (let i = 0; i < hex.length; i += 2) { - bytes.push(parseInt(hex.substr(i, 2), 16)); - } - - // Convert byte array to UTF-8 string - const utf8String = new TextDecoder('utf-8').decode(new Uint8Array(bytes)); - return utf8String; + // Remove the '0x' prefix if present + if (hex.startsWith('0x')) { + hex = hex.slice(2); + } + + // Ensure the hex string has an even length + if (hex.length % 2 !== 0) { + throw new Error('Invalid hex string length'); + } + + // Convert hex string to a byte array + const bytes = []; + for (let i = 0; i < hex.length; i += 2) { + bytes.push(parseInt(hex.substr(i, 2), 16)); + } + + // Convert byte array to UTF-8 string + const utf8String = new TextDecoder('utf-8').decode(new Uint8Array(bytes)); + return utf8String; } function uint8ArrayToHex(uint8Array) { - return Array.from(uint8Array) - .map(byte => byte.toString(16).padStart(2, '0')) // Convert each byte to a 2-digit hex value - .join(''); // Join all the hex values into a single string + return Array.from(uint8Array) + .map((byte) => byte.toString(16).padStart(2, '0')) // Convert each byte to a 2-digit hex value + .join(''); // Join all the hex values into a single string } async function getTransactions(validatorTimelockAddress, commitBatchesSharedBridge_functionSelector) { - const latestBlock = await web3.eth.getBlockNumber(); - let jsonArray = []; - for (let i = 0; i <= latestBlock; i++) { - const block = await web3.eth.getBlock(i, true); - await Promise.all(block.transactions.map(async tx => { - if (tx.to && (tx.to.toLowerCase() == validatorTimelockAddress.toLowerCase())) { - const input = tx.input; - const txSelector = input.slice(0, 10); - if (txSelector == commitBatchesSharedBridge_functionSelector) { - const functionAbi = contract.options.jsonInterface.find(item => item.name === 'commitBatchesSharedBridge'); - const decodedParams = web3.eth.abi.decodeParameters( - functionAbi.inputs, - input.slice(10) // Remove the function selector (first 10 characters of the calldata) - ); - commitment = hexToUtf8(decodedParams._newBatchesData[0].pubdataCommitments.slice(4)); - let blob = await get(commitment); - const blobHex = uint8ArrayToHex(blob); - jsonArray.push({ - commitment: commitment, - blob: blobHex - }); - } - } - })); - } - const jsonString = JSON.stringify(jsonArray, null, 2); - fs.writeFileSync("blob_data.json", jsonString, 'utf8'); + const latestBlock = await web3.eth.getBlockNumber(); + let jsonArray = []; + for (let i = 0; i <= latestBlock; i++) { + const block = await web3.eth.getBlock(i, true); + await Promise.all( + block.transactions.map(async (tx) => { + if (tx.to && tx.to.toLowerCase() == validatorTimelockAddress.toLowerCase()) { + const input = tx.input; + const txSelector = input.slice(0, 10); + if (txSelector == commitBatchesSharedBridge_functionSelector) { + const functionAbi = contract.options.jsonInterface.find( + (item) => item.name === 'commitBatchesSharedBridge' + ); + const decodedParams = web3.eth.abi.decodeParameters( + functionAbi.inputs, + input.slice(10) // Remove the function selector (first 10 characters of the calldata) + ); + commitment = hexToUtf8(decodedParams._newBatchesData[0].pubdataCommitments.slice(4)); + let blob = await get(commitment); + const blobHex = uint8ArrayToHex(blob); + jsonArray.push({ + commitment: commitment, + blob: blobHex + }); + } + } + }) + ); + } + const jsonString = JSON.stringify(jsonArray, null, 2); + fs.writeFileSync('blob_data.json', jsonString, 'utf8'); } async function get(commitment) { - try { - const url = `http://localhost:4242/get/0x${commitment}`; - - const response = await fetch(url); - - if (response.ok) { - // Expecting the response body to be binary data - const body = await response.arrayBuffer(); - return new Uint8Array(body); - } else { - return []; // Return empty array if the response is not successful - } - } catch (error) { - // Handle any errors - console.error('Error fetching data:', error); - throw error; // Re-throw the error or return an empty array, depending on requirements - } + try { + const url = `http://localhost:4242/get/0x${commitment}`; + + const response = await fetch(url); + + if (response.ok) { + // Expecting the response body to be binary data + const body = await response.arrayBuffer(); + return new Uint8Array(body); + } else { + return []; // Return empty array if the response is not successful + } + } catch (error) { + // Handle any errors + console.error('Error fetching data:', error); + throw error; // Re-throw the error or return an empty array, depending on requirements + } } function getArguments() { - const args = process.argv.slice(2); // Get arguments after the first two (which are node and script path) - - let validatorTimelockAddress = null; - let commitBatchesSharedBridge_functionSelector = null; - args.forEach(arg => { - const [key, value] = arg.split('='); - if (key === 'validatorTimelockAddress') { - validatorTimelockAddress = value; - } else if (key === 'commitBatchesSharedBridge_functionSelector') { - commitBatchesSharedBridge_functionSelector = value; - } - }); - - // Check if both arguments are provided - if (!validatorTimelockAddress || !commitBatchesSharedBridge_functionSelector) { - console.error('Usage: node getallblobs.js validatorTimelockAddress= commitBatchesSharedBridge_functionSelector='); - process.exit(1); // Exit with error - } - - return { validatorTimelockAddress, commitBatchesSharedBridge_functionSelector }; + const args = process.argv.slice(2); // Get arguments after the first two (which are node and script path) + + let validatorTimelockAddress = null; + let commitBatchesSharedBridge_functionSelector = null; + args.forEach((arg) => { + const [key, value] = arg.split('='); + if (key === 'validatorTimelockAddress') { + validatorTimelockAddress = value; + } else if (key === 'commitBatchesSharedBridge_functionSelector') { + commitBatchesSharedBridge_functionSelector = value; + } + }); + + // Check if both arguments are provided + if (!validatorTimelockAddress || !commitBatchesSharedBridge_functionSelector) { + console.error( + 'Usage: node getallblobs.js validatorTimelockAddress= commitBatchesSharedBridge_functionSelector=' + ); + process.exit(1); // Exit with error + } + + return { validatorTimelockAddress, commitBatchesSharedBridge_functionSelector }; } function main() { - // Values for local node: - // validatorTimelockAddress = check in zk init - // commitBatchesSharedBridge_functionSelector = "0x6edd4f12" - const { validatorTimelockAddress, commitBatchesSharedBridge_functionSelector } = getArguments(); - getTransactions(validatorTimelockAddress, commitBatchesSharedBridge_functionSelector); + // Values for local node: + // validatorTimelockAddress = check in zk init + // commitBatchesSharedBridge_functionSelector = "0x6edd4f12" + const { validatorTimelockAddress, commitBatchesSharedBridge_functionSelector } = getArguments(); + getTransactions(validatorTimelockAddress, commitBatchesSharedBridge_functionSelector); } main(); diff --git a/infrastructure/zk/src/docker.ts b/infrastructure/zk/src/docker.ts index 035061a8ed0..063777a671b 100644 --- a/infrastructure/zk/src/docker.ts +++ b/infrastructure/zk/src/docker.ts @@ -11,6 +11,7 @@ const IMAGES = [ 'witness-generator', 'prover-gpu-fri', 'witness-vector-generator', + 'circuit-prover-gpu', 'prover-fri-gateway', 'prover-job-monitor', 'proof-fri-gpu-compressor', @@ -87,7 +88,8 @@ function defaultTagList(image: string, imageTagSha: string, imageTagShaTS: strin 'prover-fri-gateway', 'prover-gpu-fri', 'witness-generator', - 'witness-vector-generator' + 'witness-vector-generator', + 'circuit-prover-gpu' ].includes(image) ) { tagList.push(`2.0-${protocolVersionTag}-${imageTagShaTS}`, `${protocolVersionTag}-${imageTagShaTS}`); diff --git a/prover/Cargo.lock b/prover/Cargo.lock index d29f0110f21..92366b0912b 100644 --- a/prover/Cargo.lock +++ b/prover/Cargo.lock @@ -313,6 +313,8 @@ dependencies = [ "http 1.1.0", "http-body 1.0.0", "http-body-util", + "hyper 1.3.1", + "hyper-util", "itoa", "matchit", "memchr", @@ -321,10 +323,15 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", "sync_wrapper 1.0.1", + "tokio", "tower", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -345,6 +352,7 @@ dependencies = [ "sync_wrapper 0.1.2", "tower-layer", "tower-service", + "tracing", ] [[package]] @@ -642,9 +650,9 @@ dependencies = [ [[package]] name = "boojum-cuda" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac7735446f2263e8d12435fc4d5a02c7727838eaffc7c518a961b3e839fb59e7" +checksum = "04f9a6d958dd58a0899737e5a1fc6597aefcf7980bf8be5be5329e701cbd45ca" dependencies = [ "boojum", "cmake", @@ -731,9 +739,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" [[package]] name = "cc" @@ -1406,18 +1414,18 @@ dependencies = [ [[package]] name = "derive_more" -version = "1.0.0-beta.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7abbfc297053be59290e3152f8cbcd52c8642e0728b69ee187d991d4c1af08d" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "1.0.0-beta.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bba3e9872d7c58ce7ef0fcf1844fcc3e23ef2a58377b50df35dd98e42a5726e" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2 1.0.85", "quote 1.0.36", @@ -1682,9 +1690,9 @@ checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" [[package]] name = "era_cudart" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f76aa50bd291b43ad56fb7da3e63c4c3cecb3c7e19db76c8097856371bc0d84a" +checksum = "51f0d6e329b2c11d134c3140951209be968ef316ed64ddde75640eaed7f10264" dependencies = [ "bitflags 2.6.0", "era_cudart_sys", @@ -1693,9 +1701,9 @@ dependencies = [ [[package]] name = "era_cudart_sys" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7d2db304df6b72141d45b140ec6df68ecd2300a7ab27de18b3e0e3af38c9776" +checksum = "060e8186234c7a281021fb95614e06e94e1fc7ab78938360a5c27af0f8fc6105" dependencies = [ "serde_json", ] @@ -1802,18 +1810,6 @@ dependencies = [ "subtle", ] -[[package]] -name = "ff_ce" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" -dependencies = [ - "byteorder", - "hex", - "rand 0.4.6", - "serde", -] - [[package]] name = "fiat-crypto" version = "0.2.9" @@ -5112,9 +5108,9 @@ checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -5131,9 +5127,9 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2 1.0.85", "quote 1.0.36", @@ -5151,6 +5147,16 @@ dependencies = [ "serde", ] +[[package]] +name = "serde_path_to_error" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af99884400da37c88f5e9146b7f1fd0fbcae8f6eec4e9da38b67d05486f814a6" +dependencies = [ + "itoa", + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -5295,9 +5301,9 @@ checksum = "24188a676b6ae68c3b2cb3a01be17fbf7240ce009799bb56d5b1409051e78fde" [[package]] name = "shivini" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f11e6942c89861aecb72261f8220800a1b69b8a5463c07c24df75b81fd809b0" +checksum = "ebb6d928451f0779f14da02ee9d51d4bde560328edc6471f0d5c5c11954345c4" dependencies = [ "bincode", "blake2 0.10.6", @@ -7261,9 +7267,9 @@ dependencies = [ [[package]] name = "zksync-gpu-ffi" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aecd7f624185b785e9d8457986ac34685d478e2baa78417d51b102b7d0fa27fd" +checksum = "86511b3957adfe415ecdbd1ee01c51aa3ca131a607e61ca024976312f613b0f9" dependencies = [ "bindgen 0.59.2", "cmake", @@ -7277,9 +7283,9 @@ dependencies = [ [[package]] name = "zksync-gpu-prover" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a089b11fcdbd37065acaf427545cb50b87e6712951a10f3761b3d370e4b8f9bc" +checksum = "9e4c00f2db603d1b696bc2e9d822bb4c087050de5b65559067fc2232786cbc93" dependencies = [ "bit-vec", "cfg-if", @@ -7294,9 +7300,9 @@ dependencies = [ [[package]] name = "zksync-wrapper-prover" -version = "0.150.7" +version = "0.150.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc764c21d4ae15c5bc2c07c14c814c5e3ba8d194ddcca543b8cec95456031832" +checksum = "d58df1ec10e0d5eb58563bb01abda5ed185c9b9621502e361848ca40eb7868ac" dependencies = [ "circuit_definitions", "zkevm_test_harness", @@ -7312,6 +7318,7 @@ dependencies = [ "ethabi", "hex", "num_enum 0.7.2", + "secrecy", "serde", "serde_json", "serde_with", @@ -7344,11 +7351,38 @@ dependencies = [ "zksync_pairing", ] +[[package]] +name = "zksync_circuit_prover" +version = "0.1.0" +dependencies = [ + "anyhow", + "async-trait", + "bincode", + "clap 4.5.4", + "shivini", + "tokio", + "tokio-util", + "tracing", + "vise", + "zkevm_test_harness", + "zksync_config", + "zksync_core_leftovers", + "zksync_env_config", + "zksync_object_store", + "zksync_prover_dal", + "zksync_prover_fri_types", + "zksync_prover_fri_utils", + "zksync_prover_keystore", + "zksync_queued_job_processor", + "zksync_types", + "zksync_utils", +] + [[package]] name = "zksync_concurrency" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" +checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" dependencies = [ "anyhow", "once_cell", @@ -7382,20 +7416,18 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" +checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" dependencies = [ "anyhow", "blst", "ed25519-dalek", "elliptic-curve 0.13.8", - "ff_ce", "hex", "k256 0.13.3", "num-bigint 0.4.5", "num-traits", - "rand 0.4.6", "rand 0.8.5", "sha3 0.10.8", "thiserror", @@ -7405,9 +7437,9 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" +checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" dependencies = [ "anyhow", "bit-vec", @@ -7427,9 +7459,9 @@ dependencies = [ [[package]] name = "zksync_consensus_storage" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b9dbcb923fa201af03f49f70c11a923b416915d2ddf8b2de3a2e861f22898a4" +checksum = "ff43cfd03ea205c763e74362dc6ec5a4d74b6b1baef0fb134dde92a8880397f7" dependencies = [ "anyhow", "async-trait", @@ -7447,9 +7479,9 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" +checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" dependencies = [ "anyhow", "rand 0.8.5", @@ -7533,6 +7565,7 @@ dependencies = [ "tokio", "tracing", "vise", + "zksync_concurrency", "zksync_consensus_roles", "zksync_consensus_storage", "zksync_contracts", @@ -7596,7 +7629,8 @@ dependencies = [ "async-trait", "rlp", "thiserror", - "zksync_types", + "zksync_basic_types", + "zksync_crypto_primitives", ] [[package]] @@ -7666,7 +7700,6 @@ dependencies = [ "hex", "itertools 0.10.5", "once_cell", - "pretty_assertions", "thiserror", "tracing", "vise", @@ -7755,9 +7788,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" +checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" dependencies = [ "anyhow", "bit-vec", @@ -7776,9 +7809,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" +checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" dependencies = [ "anyhow", "heck 0.5.0", @@ -7928,13 +7961,16 @@ version = "0.1.0" dependencies = [ "anyhow", "async-trait", + "axum", "clap 4.5.4", "ctrlc", + "serde", "tokio", "tracing", "vise", "zksync_config", "zksync_core_leftovers", + "zksync_db_connection", "zksync_prover_dal", "zksync_types", "zksync_utils", @@ -7948,6 +7984,7 @@ dependencies = [ "anyhow", "bincode", "circuit_definitions", + "futures 0.3.30", "hex", "md5", "once_cell", @@ -7955,6 +7992,7 @@ dependencies = [ "serde_json", "sha3 0.10.8", "shivini", + "tokio", "tracing", "zkevm_test_harness", "zksync_basic_types", @@ -8002,6 +8040,7 @@ dependencies = [ "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tracing", @@ -8084,8 +8123,8 @@ dependencies = [ [[package]] name = "zksync_vm2" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "enum_dispatch", "primitive-types", @@ -8096,8 +8135,8 @@ dependencies = [ [[package]] name = "zksync_vm2_interface" -version = "0.1.0" -source = "git+https://github.com/matter-labs/vm2.git?rev=cd6136c42ec56856e0abcf2a98d1a9e120161482#cd6136c42ec56856e0abcf2a98d1a9e120161482" +version = "0.2.1" +source = "git+https://github.com/matter-labs/vm2.git?rev=a233d44bbe61dc6a758a754c3b78fe4f83e56699#a233d44bbe61dc6a758a754c3b78fe4f83e56699" dependencies = [ "primitive-types", ] @@ -8109,6 +8148,7 @@ dependencies = [ "anyhow", "async-trait", "hex", + "pretty_assertions", "serde", "thiserror", "tracing", diff --git a/prover/Cargo.toml b/prover/Cargo.toml index 624661adc8d..e95bae3d4c1 100644 --- a/prover/Cargo.toml +++ b/prover/Cargo.toml @@ -19,6 +19,7 @@ categories = ["cryptography"] [workspace.dependencies] # Common dependencies anyhow = "1.0" +axum = "0.7.5" async-trait = "0.1" bincode = "1" chrono = "0.4.38" @@ -50,6 +51,7 @@ structopt = "0.3.26" strum = { version = "0.26" } tempfile = "3" tokio = "1" +tokio-util = "0.7.11" toml_edit = "0.14.4" tracing = "0.1" tracing-subscriber = "0.3" @@ -61,8 +63,8 @@ circuit_sequencer_api = "=0.150.5" zkevm_test_harness = "=0.150.5" # GPU proving dependencies -wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.7" } -shivini = "=0.150.7" +wrapper_prover = { package = "zksync-wrapper-prover", version = "=0.150.9" } +shivini = "=0.150.9" # Core workspace dependencies zksync_multivm = { path = "../core/lib/multivm", version = "0.1.0" } diff --git a/prover/crates/bin/circuit_prover/Cargo.toml b/prover/crates/bin/circuit_prover/Cargo.toml new file mode 100644 index 00000000000..a5751a4cd9a --- /dev/null +++ b/prover/crates/bin/circuit_prover/Cargo.toml @@ -0,0 +1,38 @@ +[package] +name = "zksync_circuit_prover" +version.workspace = true +edition.workspace = true +authors.workspace = true +homepage.workspace = true +repository.workspace = true +license.workspace = true +keywords.workspace = true +categories.workspace = true + +[dependencies] +tokio = { workspace = true, features = ["macros", "time"] } +tokio-util.workspace = true +anyhow.workspace = true +async-trait.workspace = true +tracing.workspace = true +bincode.workspace = true +clap = { workspace = true, features = ["derive"] } + +zksync_config.workspace = true +zksync_object_store.workspace = true +zksync_prover_dal.workspace = true +zksync_prover_fri_types.workspace = true +zksync_prover_fri_utils.workspace = true +zksync_queued_job_processor.workspace = true +zksync_types.workspace = true +zksync_prover_keystore = { workspace = true, features = ["gpu"] } +zksync_env_config.workspace = true +zksync_core_leftovers.workspace = true +zksync_utils.workspace = true + +vise.workspace = true +shivini = { workspace = true, features = [ + "circuit_definitions", + "zksync", +] } +zkevm_test_harness.workspace = true diff --git a/prover/crates/bin/circuit_prover/src/backoff.rs b/prover/crates/bin/circuit_prover/src/backoff.rs new file mode 100644 index 00000000000..6ddb3d94be3 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/backoff.rs @@ -0,0 +1,39 @@ +use std::{ops::Mul, time::Duration}; + +/// Backoff - convenience structure that takes care of backoff timings. +#[derive(Debug, Clone)] +pub struct Backoff { + base_delay: Duration, + current_delay: Duration, + max_delay: Duration, +} + +impl Backoff { + /// The delay multiplication coefficient. + // Currently it's hardcoded, but could be provided in the constructor. + const DELAY_MULTIPLIER: u32 = 2; + + /// Create a backoff with base_delay (first delay) and max_delay (maximum delay possible). + pub fn new(base_delay: Duration, max_delay: Duration) -> Self { + Backoff { + base_delay, + current_delay: base_delay, + max_delay, + } + } + + /// Get current delay, handling future delays if needed + pub fn delay(&mut self) -> Duration { + let delay = self.current_delay; + self.current_delay = self + .current_delay + .mul(Self::DELAY_MULTIPLIER) + .min(self.max_delay); + delay + } + + /// Reset the backoff time for to base delay + pub fn reset(&mut self) { + self.current_delay = self.base_delay; + } +} diff --git a/prover/crates/bin/circuit_prover/src/circuit_prover.rs b/prover/crates/bin/circuit_prover/src/circuit_prover.rs new file mode 100644 index 00000000000..1a5f8aa0d97 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/circuit_prover.rs @@ -0,0 +1,397 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context; +use shivini::{ + gpu_proof_config::GpuProofConfig, gpu_prove_from_external_witness_data, ProverContext, + ProverContextConfig, +}; +use tokio::{sync::mpsc::Receiver, task::JoinHandle}; +use tokio_util::sync::CancellationToken; +use zkevm_test_harness::prover_utils::{verify_base_layer_proof, verify_recursion_layer_proof}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + base_layer_proof_config, + boojum::{ + cs::implementations::{pow::NoPow, witness::WitnessVec}, + field::goldilocks::GoldilocksField, + worker::Worker, + }, + circuit_definitions::{ + base_layer::ZkSyncBaseLayerProof, recursion_layer::ZkSyncRecursionLayerProof, + }, + recursion_layer_proof_config, + }, + CircuitWrapper, FriProofWrapper, ProverArtifacts, WitnessVectorArtifactsTemp, +}; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; +use zksync_types::protocol_version::ProtocolSemanticVersion; +use zksync_utils::panic_extractor::try_extract_panic_message; + +use crate::{ + metrics::CIRCUIT_PROVER_METRICS, + types::{DefaultTranscript, DefaultTreeHasher, Proof, VerificationKey}, + SetupDataCache, +}; + +/// In charge of proving circuits, given a Witness Vector source. +/// Both job runner & job executor. +#[derive(Debug)] +pub struct CircuitProver { + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + /// Witness Vector source receiver + receiver: Receiver, + /// Setup Data used for proving & proof verification + setup_data_cache: SetupDataCache, +} + +impl CircuitProver { + pub fn new( + connection_pool: ConnectionPool, + object_store: Arc, + protocol_version: ProtocolSemanticVersion, + receiver: Receiver, + max_allocation: Option, + setup_data_cache: SetupDataCache, + ) -> anyhow::Result<(Self, ProverContext)> { + // VRAM allocation + let prover_context = match max_allocation { + Some(max_allocation) => ProverContext::create_with_config( + ProverContextConfig::default().with_maximum_device_allocation(max_allocation), + ) + .context("failed initializing fixed gpu prover context")?, + None => ProverContext::create().context("failed initializing gpu prover context")?, + }; + Ok(( + Self { + connection_pool, + object_store, + protocol_version, + receiver, + setup_data_cache, + }, + prover_context, + )) + } + + /// Continuously polls `receiver` for Witness Vectors and proves them. + /// All job executions are persisted. + pub async fn run(mut self, cancellation_token: CancellationToken) -> anyhow::Result<()> { + while !cancellation_token.is_cancelled() { + let time = Instant::now(); + + let artifact = self + .receiver + .recv() + .await + .context("no Witness Vector Generators are available")?; + tracing::info!( + "Circuit Prover received job {:?} after: {:?}", + artifact.prover_job.job_id, + time.elapsed() + ); + CIRCUIT_PROVER_METRICS.job_wait_time.observe(time.elapsed()); + + self.prove(artifact, cancellation_token.clone()) + .await + .context("failed to prove circuit proof")?; + } + tracing::info!("Circuit Prover shut down."); + Ok(()) + } + + /// Proves a job, with persistence of execution. + async fn prove( + &self, + artifact: WitnessVectorArtifactsTemp, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + let time = Instant::now(); + let block_number = artifact.prover_job.block_number; + let job_id = artifact.prover_job.job_id; + let job_start_time = artifact.time; + let setup_data_key = artifact.prover_job.setup_data_key.crypto_setup_key(); + let setup_data = self + .setup_data_cache + .get(&setup_data_key) + .context(format!( + "failed to get setup data for key {setup_data_key:?}" + ))? + .clone(); + let task = tokio::task::spawn_blocking(move || { + let _span = tracing::info_span!("prove_circuit_proof", %block_number).entered(); + Self::prove_circuit_proof(artifact, setup_data).context("failed to prove circuit") + }); + + self.finish_task( + job_id, + time, + job_start_time, + task, + cancellation_token.clone(), + ) + .await?; + tracing::info!( + "Circuit Prover finished job {:?} in: {:?}", + job_id, + time.elapsed() + ); + CIRCUIT_PROVER_METRICS + .job_finished_time + .observe(time.elapsed()); + CIRCUIT_PROVER_METRICS + .full_proving_time + .observe(job_start_time.elapsed()); + Ok(()) + } + + /// Proves a job using crypto primitives (proof generation & proof verification). + #[tracing::instrument( + name = "Prover::prove_circuit_proof", + skip_all, + fields(l1_batch = % witness_vector_artifacts.prover_job.block_number) + )] + pub fn prove_circuit_proof( + witness_vector_artifacts: WitnessVectorArtifactsTemp, + setup_data: Arc, + ) -> anyhow::Result { + let time = Instant::now(); + let WitnessVectorArtifactsTemp { + witness_vector, + prover_job, + .. + } = witness_vector_artifacts; + + let job_id = prover_job.job_id; + let circuit_wrapper = prover_job.circuit_wrapper; + let block_number = prover_job.block_number; + + let (proof, circuit_id) = + Self::generate_proof(&circuit_wrapper, witness_vector, &setup_data) + .context(format!("failed to generate proof for job id {job_id}"))?; + + Self::verify_proof(&circuit_wrapper, &proof, &setup_data.vk).context(format!( + "failed to verify proof with job_id {job_id}, circuit_id: {circuit_id}" + ))?; + + let proof_wrapper = match &circuit_wrapper { + CircuitWrapper::Base(_) => { + FriProofWrapper::Base(ZkSyncBaseLayerProof::from_inner(circuit_id, proof)) + } + CircuitWrapper::Recursive(_) => { + FriProofWrapper::Recursive(ZkSyncRecursionLayerProof::from_inner(circuit_id, proof)) + } + CircuitWrapper::BasePartial(_) => { + return Self::partial_proof_error(); + } + }; + CIRCUIT_PROVER_METRICS + .crypto_primitives_time + .observe(time.elapsed()); + Ok(ProverArtifacts::new(block_number, proof_wrapper)) + } + + /// Generates a proof from crypto primitives. + fn generate_proof( + circuit_wrapper: &CircuitWrapper, + witness_vector: WitnessVec, + setup_data: &Arc, + ) -> anyhow::Result<(Proof, u8)> { + let time = Instant::now(); + + let worker = Worker::new(); + + let (gpu_proof_config, proof_config, circuit_id) = match circuit_wrapper { + CircuitWrapper::Base(circuit) => ( + GpuProofConfig::from_base_layer_circuit(circuit), + base_layer_proof_config(), + circuit.numeric_circuit_type(), + ), + CircuitWrapper::Recursive(circuit) => ( + GpuProofConfig::from_recursive_layer_circuit(circuit), + recursion_layer_proof_config(), + circuit.numeric_circuit_type(), + ), + CircuitWrapper::BasePartial(_) => { + return Self::partial_proof_error(); + } + }; + + let proof = + gpu_prove_from_external_witness_data::( + &gpu_proof_config, + &witness_vector, + proof_config, + &setup_data.setup, + &setup_data.vk, + (), + &worker, + ) + .context("crypto primitive: failed to generate proof")?; + CIRCUIT_PROVER_METRICS + .generate_proof_time + .observe(time.elapsed()); + Ok((proof.into(), circuit_id)) + } + + /// Verifies a proof from crypto primitives + fn verify_proof( + circuit_wrapper: &CircuitWrapper, + proof: &Proof, + verification_key: &VerificationKey, + ) -> anyhow::Result<()> { + let time = Instant::now(); + + let is_valid = match circuit_wrapper { + CircuitWrapper::Base(base_circuit) => { + verify_base_layer_proof::(base_circuit, proof, verification_key) + } + CircuitWrapper::Recursive(recursive_circuit) => { + verify_recursion_layer_proof::(recursive_circuit, proof, verification_key) + } + CircuitWrapper::BasePartial(_) => { + return Self::partial_proof_error(); + } + }; + + CIRCUIT_PROVER_METRICS + .verify_proof_time + .observe(time.elapsed()); + + if !is_valid { + return Err(anyhow::anyhow!("crypto primitive: failed to verify proof")); + } + Ok(()) + } + + /// This code path should never trigger. All proofs are hydrated during Witness Vector Generator. + /// If this triggers, it means that proof hydration in Witness Vector Generator was not done -- logic bug. + fn partial_proof_error() -> anyhow::Result { + Err(anyhow::anyhow!("received unexpected dehydrated proof")) + } + + /// Runs task to completion and persists result. + /// NOTE: Task may be cancelled mid-flight. + async fn finish_task( + &self, + job_id: u32, + time: Instant, + job_start_time: Instant, + task: JoinHandle>, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + tokio::select! { + _ = cancellation_token.cancelled() => { + tracing::info!("Stop signal received, shutting down Circuit Prover..."); + return Ok(()) + } + result = task => { + let error_message = match result { + Ok(Ok(prover_artifact)) => { + tracing::info!("Circuit Prover executed job {:?} in: {:?}", job_id, time.elapsed()); + CIRCUIT_PROVER_METRICS.execution_time.observe(time.elapsed()); + self + .save_result(job_id, job_start_time, prover_artifact) + .await.context("failed to save result")?; + return Ok(()) + } + Ok(Err(error)) => error.to_string(), + Err(error) => try_extract_panic_message(error), + }; + tracing::error!( + "Circuit Prover failed on job {:?} with error {:?}", + job_id, + error_message + ); + + self.save_failure(job_id, error_message).await.context("failed to save failure")?; + } + } + + Ok(()) + } + + /// Persists proof generated. + /// Job metadata is saved to database, whilst artifacts go to object store. + async fn save_result( + &self, + job_id: u32, + job_start_time: Instant, + artifacts: ProverArtifacts, + ) -> anyhow::Result<()> { + let time = Instant::now(); + let mut connection = self + .connection_pool + .connection() + .await + .context("failed to get db connection")?; + let proof = artifacts.proof_wrapper; + + let (_circuit_type, is_scheduler_proof) = match &proof { + FriProofWrapper::Base(base) => (base.numeric_circuit_type(), false), + FriProofWrapper::Recursive(recursive_circuit) => match recursive_circuit { + ZkSyncRecursionLayerProof::SchedulerCircuit(_) => { + (recursive_circuit.numeric_circuit_type(), true) + } + _ => (recursive_circuit.numeric_circuit_type(), false), + }, + }; + + let upload_time = Instant::now(); + let blob_url = self + .object_store + .put(job_id, &proof) + .await + .context("failed to upload to object store")?; + CIRCUIT_PROVER_METRICS + .artifact_upload_time + .observe(upload_time.elapsed()); + + let mut transaction = connection + .start_transaction() + .await + .context("failed to start db transaction")?; + transaction + .fri_prover_jobs_dal() + .save_proof(job_id, job_start_time.elapsed(), &blob_url) + .await; + if is_scheduler_proof { + transaction + .fri_proof_compressor_dal() + .insert_proof_compression_job( + artifacts.block_number, + &blob_url, + self.protocol_version, + ) + .await; + } + transaction + .commit() + .await + .context("failed to commit db transaction")?; + + tracing::info!( + "Circuit Prover saved job {:?} after {:?}", + job_id, + time.elapsed() + ); + CIRCUIT_PROVER_METRICS.save_time.observe(time.elapsed()); + + Ok(()) + } + + /// Persists job execution error to database. + async fn save_failure(&self, job_id: u32, error: String) -> anyhow::Result<()> { + self.connection_pool + .connection() + .await + .context("failed to get db connection")? + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + Ok(()) + } +} diff --git a/prover/crates/bin/circuit_prover/src/lib.rs b/prover/crates/bin/circuit_prover/src/lib.rs new file mode 100644 index 00000000000..7d7ce1d9668 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/lib.rs @@ -0,0 +1,13 @@ +#![allow(incomplete_features)] // We have to use generic const exprs. +#![feature(generic_const_exprs)] +pub use backoff::Backoff; +pub use circuit_prover::CircuitProver; +pub use metrics::PROVER_BINARY_METRICS; +pub use types::{FinalizationHintsCache, SetupDataCache}; +pub use witness_vector_generator::WitnessVectorGenerator; + +mod backoff; +mod circuit_prover; +mod metrics; +mod types; +mod witness_vector_generator; diff --git a/prover/crates/bin/circuit_prover/src/main.rs b/prover/crates/bin/circuit_prover/src/main.rs new file mode 100644 index 00000000000..e26f29ca995 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/main.rs @@ -0,0 +1,201 @@ +use std::{ + path::PathBuf, + sync::Arc, + time::{Duration, Instant}, +}; + +use anyhow::Context as _; +use clap::Parser; +use tokio_util::sync::CancellationToken; +use zksync_circuit_prover::{ + Backoff, CircuitProver, FinalizationHintsCache, SetupDataCache, WitnessVectorGenerator, + PROVER_BINARY_METRICS, +}; +use zksync_config::{ + configs::{FriProverConfig, ObservabilityConfig}, + ObjectStoreConfig, +}; +use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_general_config}; +use zksync_object_store::{ObjectStore, ObjectStoreFactory}; +use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_fri_types::PROVER_PROTOCOL_SEMANTIC_VERSION; +use zksync_prover_keystore::keystore::Keystore; +use zksync_utils::wait_for_tasks::ManagedTasks; + +#[derive(Debug, Parser)] +#[command(author = "Matter Labs", version)] +struct Cli { + #[arg(long)] + pub(crate) config_path: Option, + #[arg(long)] + pub(crate) secrets_path: Option, + /// Number of WVG jobs to run in parallel. + /// Default value is 1. + #[arg(long, default_value_t = 1)] + pub(crate) witness_vector_generator_count: usize, + /// Max VRAM to allocate. Useful if you want to limit the size of VRAM used. + /// None corresponds to allocating all available VRAM. + #[arg(long)] + pub(crate) max_allocation: Option, +} + +#[tokio::main] +async fn main() -> anyhow::Result<()> { + let time = Instant::now(); + let opt = Cli::parse(); + + let (observability_config, prover_config, object_store_config) = load_configs(opt.config_path)?; + + let _observability_guard = observability_config + .install() + .context("failed to install observability")?; + + let wvg_count = opt.witness_vector_generator_count as u32; + + let (connection_pool, object_store, setup_data_cache, hints) = load_resources( + opt.secrets_path, + object_store_config, + prover_config.setup_data_path.into(), + wvg_count, + ) + .await + .context("failed to load configs")?; + + PROVER_BINARY_METRICS.start_up.observe(time.elapsed()); + + let cancellation_token = CancellationToken::new(); + let backoff = Backoff::new(Duration::from_secs(5), Duration::from_secs(30)); + + let mut tasks = vec![]; + + let (sender, receiver) = tokio::sync::mpsc::channel(5); + + tracing::info!("Starting {wvg_count} Witness Vector Generators."); + + for _ in 0..wvg_count { + let wvg = WitnessVectorGenerator::new( + object_store.clone(), + connection_pool.clone(), + PROVER_PROTOCOL_SEMANTIC_VERSION, + sender.clone(), + hints.clone(), + ); + tasks.push(tokio::spawn( + wvg.run(cancellation_token.clone(), backoff.clone()), + )); + } + + // NOTE: Prover Context is the way VRAM is allocated. If it is dropped, the claim on VRAM allocation is dropped as well. + // It has to be kept until prover dies. Whilst it may be kept in prover struct, during cancellation, prover can `drop`, but the thread doing the processing can still be alive. + // This setup prevents segmentation faults and other nasty behavior during shutdown. + let (prover, _prover_context) = CircuitProver::new( + connection_pool, + object_store, + PROVER_PROTOCOL_SEMANTIC_VERSION, + receiver, + opt.max_allocation, + setup_data_cache, + ) + .context("failed to create circuit prover")?; + tasks.push(tokio::spawn(prover.run(cancellation_token.clone()))); + + let mut tasks = ManagedTasks::new(tasks); + tokio::select! { + _ = tasks.wait_single() => {}, + result = tokio::signal::ctrl_c() => { + match result { + Ok(_) => { + tracing::info!("Stop signal received, shutting down..."); + cancellation_token.cancel(); + }, + Err(_err) => { + tracing::error!("failed to set up ctrl c listener"); + } + } + } + } + PROVER_BINARY_METRICS.run_time.observe(time.elapsed()); + tasks.complete(Duration::from_secs(5)).await; + + Ok(()) +} + +/// Loads configs necessary for proving. +/// - observability config - for observability setup +/// - prover config - necessary for setup data +/// - object store config - for retrieving artifacts for WVG & CP +fn load_configs( + config_path: Option, +) -> anyhow::Result<(ObservabilityConfig, FriProverConfig, ObjectStoreConfig)> { + tracing::info!("loading configs..."); + let general_config = + load_general_config(config_path).context("failed loading general config")?; + let observability_config = general_config + .observability + .context("failed loading observability config")?; + let prover_config = general_config + .prover_config + .context("failed loading prover config")?; + let object_store_config = prover_config + .prover_object_store + .clone() + .context("failed loading prover object store config")?; + tracing::info!("Loaded configs."); + Ok((observability_config, prover_config, object_store_config)) +} + +/// Loads resources necessary for proving. +/// - connection pool - necessary to pick & store jobs from database +/// - object store - necessary for loading and storing artifacts to object store +/// - setup data - necessary for circuit proving +/// - finalization hints - necessary for generating witness vectors +async fn load_resources( + secrets_path: Option, + object_store_config: ObjectStoreConfig, + setup_data_path: PathBuf, + wvg_count: u32, +) -> anyhow::Result<( + ConnectionPool, + Arc, + SetupDataCache, + FinalizationHintsCache, +)> { + let database_secrets = + load_database_secrets(secrets_path).context("failed to load database secrets")?; + let database_url = database_secrets + .prover_url + .context("no prover DB URl present")?; + + // 1 connection for the prover and one for each vector generator + let max_connections = 1 + wvg_count; + let connection_pool = ConnectionPool::::builder(database_url, max_connections) + .build() + .await + .context("failed to build connection pool")?; + + let object_store = ObjectStoreFactory::new(object_store_config) + .create_store() + .await + .context("failed to create object store")?; + + tracing::info!("Loading mappings from disk..."); + + let keystore = Keystore::locate().with_setup_path(Some(setup_data_path)); + let setup_data_cache = keystore + .load_all_setup_key_mapping() + .await + .context("failed to load setup key mapping")?; + let finalization_hints = keystore + .load_all_finalization_hints_mapping() + .await + .context("failed to load finalization hints mapping")?; + + tracing::info!("Loaded mappings from disk."); + + Ok(( + connection_pool, + object_store, + setup_data_cache, + finalization_hints, + )) +} diff --git a/prover/crates/bin/circuit_prover/src/metrics.rs b/prover/crates/bin/circuit_prover/src/metrics.rs new file mode 100644 index 00000000000..e9f44591479 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/metrics.rs @@ -0,0 +1,80 @@ +use std::time::Duration; + +use vise::{Buckets, Histogram, Metrics}; + +#[derive(Debug, Metrics)] +#[metrics(prefix = "prover_binary")] +pub struct ProverBinaryMetrics { + /// How long does it take for prover to load data before it can produce proofs? + #[metrics(buckets = Buckets::LATENCIES)] + pub start_up: Histogram, + /// How long has the prover been running? + #[metrics(buckets = Buckets::LATENCIES)] + pub run_time: Histogram, +} + +#[vise::register] +pub static PROVER_BINARY_METRICS: vise::Global = vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "witness_vector_generator")] +pub struct WitnessVectorGeneratorMetrics { + /// How long does witness vector generator waits before a job is available? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_wait_time: Histogram, + /// How long does it take to load object store artifacts for a witness vector job? + #[metrics(buckets = Buckets::LATENCIES)] + pub artifact_download_time: Histogram, + /// How long does the crypto witness generation primitive take? + #[metrics(buckets = Buckets::LATENCIES)] + pub crypto_primitive_time: Histogram, + /// How long does it take for a job to be executed, from the moment it's loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub execution_time: Histogram, + /// How long does it take to send a job to prover? + /// This is relevant because prover queue can apply back-pressure. + #[metrics(buckets = Buckets::LATENCIES)] + pub send_time: Histogram, + /// How long does it take for a job to be considered finished, from the moment it's been loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_finished_time: Histogram, +} + +#[vise::register] +pub static WITNESS_VECTOR_GENERATOR_METRICS: vise::Global = + vise::Global::new(); + +#[derive(Debug, Metrics)] +#[metrics(prefix = "circuit_prover")] +pub struct CircuitProverMetrics { + /// How long does circuit prover wait before a job is available? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_wait_time: Histogram, + /// How long does the crypto primitives (proof generation & verification) take? + #[metrics(buckets = Buckets::LATENCIES)] + pub crypto_primitives_time: Histogram, + /// How long does proof generation (crypto primitive) take? + #[metrics(buckets = Buckets::LATENCIES)] + pub generate_proof_time: Histogram, + /// How long does verify proof (crypto primitive) take? + #[metrics(buckets = Buckets::LATENCIES)] + pub verify_proof_time: Histogram, + /// How long does it take for a job to be executed, from the moment it's loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub execution_time: Histogram, + /// How long does it take to upload proof to object store? + #[metrics(buckets = Buckets::LATENCIES)] + pub artifact_upload_time: Histogram, + /// How long does it take to save a job? + #[metrics(buckets = Buckets::LATENCIES)] + pub save_time: Histogram, + /// How long does it take for a job to be considered finished, from the moment it's been loaded? + #[metrics(buckets = Buckets::LATENCIES)] + pub job_finished_time: Histogram, + /// How long does it take a job to go from witness generation to having the proof saved? + #[metrics(buckets = Buckets::LATENCIES)] + pub full_proving_time: Histogram, +} + +#[vise::register] +pub static CIRCUIT_PROVER_METRICS: vise::Global = vise::Global::new(); diff --git a/prover/crates/bin/circuit_prover/src/types.rs b/prover/crates/bin/circuit_prover/src/types.rs new file mode 100644 index 00000000000..52cdd48b6b5 --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/types.rs @@ -0,0 +1,31 @@ +use std::{collections::HashMap, sync::Arc}; + +use zksync_prover_fri_types::{ + circuit_definitions::boojum::{ + algebraic_props::{ + round_function::AbsorptionModeOverwrite, sponge::GoldilocksPoseidon2Sponge, + }, + cs::implementations::{ + proof::Proof as CryptoProof, setup::FinalizationHintsForProver, + transcript::GoldilocksPoisedon2Transcript, + verifier::VerificationKey as CryptoVerificationKey, + }, + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + }, + ProverServiceDataKey, +}; +use zksync_prover_keystore::GoldilocksGpuProverSetupData; + +// prover types +pub type DefaultTranscript = GoldilocksPoisedon2Transcript; +pub type DefaultTreeHasher = GoldilocksPoseidon2Sponge; + +type F = GoldilocksField; +type H = GoldilocksPoseidon2Sponge; +type Ext = GoldilocksExt2; +pub type Proof = CryptoProof; +pub type VerificationKey = CryptoVerificationKey; + +// cache types +pub type SetupDataCache = HashMap>; +pub type FinalizationHintsCache = HashMap>; diff --git a/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs b/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs new file mode 100644 index 00000000000..cb2d2a256df --- /dev/null +++ b/prover/crates/bin/circuit_prover/src/witness_vector_generator.rs @@ -0,0 +1,345 @@ +use std::{collections::HashMap, sync::Arc, time::Instant}; + +use anyhow::Context; +use tokio::{sync::mpsc::Sender, task::JoinHandle}; +use tokio_util::sync::CancellationToken; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::{ + cs::implementations::setup::FinalizationHintsForProver, + field::goldilocks::GoldilocksField, + gadgets::queue::full_state_queue::FullStateCircuitQueueRawWitness, + }, + circuit_definitions::base_layer::ZkSyncBaseLayerCircuit, + }, + get_current_pod_name, + keys::RamPermutationQueueWitnessKey, + CircuitAuxData, CircuitWrapper, ProverJob, ProverServiceDataKey, RamPermutationQueueWitness, + WitnessVectorArtifactsTemp, +}; +use zksync_types::{protocol_version::ProtocolSemanticVersion, L1BatchNumber}; +use zksync_utils::panic_extractor::try_extract_panic_message; + +use crate::{metrics::WITNESS_VECTOR_GENERATOR_METRICS, Backoff, FinalizationHintsCache}; + +/// In charge of generating Witness Vectors and sending them to Circuit Prover. +/// Both job runner & job executor. +#[derive(Debug)] +pub struct WitnessVectorGenerator { + object_store: Arc, + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + /// Finalization Hints used for Witness Vector generation + finalization_hints_cache: FinalizationHintsCache, + /// Witness Vector sender for Circuit Prover + sender: Sender, + pod_name: String, +} + +impl WitnessVectorGenerator { + pub fn new( + object_store: Arc, + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + sender: Sender, + finalization_hints: HashMap>, + ) -> Self { + Self { + object_store, + connection_pool, + protocol_version, + finalization_hints_cache: finalization_hints, + sender, + pod_name: get_current_pod_name(), + } + } + + /// Continuously polls database for new prover jobs and generates witness vectors for them. + /// All job executions are persisted. + pub async fn run( + self, + cancellation_token: CancellationToken, + mut backoff: Backoff, + ) -> anyhow::Result<()> { + let mut get_job_timer = Instant::now(); + while !cancellation_token.is_cancelled() { + if let Some(prover_job) = self + .get_job() + .await + .context("failed to get next witness generation job")? + { + tracing::info!( + "Witness Vector Generator received job {:?} after: {:?}", + prover_job.job_id, + get_job_timer.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .job_wait_time + .observe(get_job_timer.elapsed()); + if let e @ Err(_) = self.generate(prover_job, cancellation_token.clone()).await { + // this means that the witness vector receiver is closed, no need to report the error, just return + if cancellation_token.is_cancelled() { + return Ok(()); + } + e.context("failed to generate witness")? + } + + // waiting for a job timer starts as soon as the other is finished + get_job_timer = Instant::now(); + backoff.reset(); + continue; + }; + self.backoff(&mut backoff, cancellation_token.clone()).await; + } + tracing::info!("Witness Vector Generator shut down."); + Ok(()) + } + + /// Retrieves a prover job from database, loads artifacts from object store and hydrates them. + async fn get_job(&self) -> anyhow::Result> { + let mut connection = self + .connection_pool + .connection() + .await + .context("failed to get db connection")?; + let prover_job_metadata = match connection + .fri_prover_jobs_dal() + .get_job(self.protocol_version, &self.pod_name) + .await + { + None => return Ok(None), + Some(job) => job, + }; + + let time = Instant::now(); + let circuit_wrapper = self + .object_store + .get(prover_job_metadata.into()) + .await + .context("failed to get circuit_wrapper from object store")?; + let artifact = match circuit_wrapper { + a @ CircuitWrapper::Base(_) => a, + a @ CircuitWrapper::Recursive(_) => a, + CircuitWrapper::BasePartial((circuit, aux_data)) => self + .fill_witness(circuit, aux_data, prover_job_metadata.block_number) + .await + .context("failed to fill witness")?, + }; + WITNESS_VECTOR_GENERATOR_METRICS + .artifact_download_time + .observe(time.elapsed()); + + let setup_data_key = ProverServiceDataKey { + circuit_id: prover_job_metadata.circuit_id, + round: prover_job_metadata.aggregation_round, + } + .crypto_setup_key(); + let prover_job = ProverJob::new( + prover_job_metadata.block_number, + prover_job_metadata.id, + artifact, + setup_data_key, + ); + Ok(Some(prover_job)) + } + + /// Prover artifact hydration. + async fn fill_witness( + &self, + circuit: ZkSyncBaseLayerCircuit, + aux_data: CircuitAuxData, + l1_batch_number: L1BatchNumber, + ) -> anyhow::Result { + if let ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance) = circuit { + let sorted_witness_key = RamPermutationQueueWitnessKey { + block_number: l1_batch_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: true, + }; + let sorted_witness: RamPermutationQueueWitness = self + .object_store + .get(sorted_witness_key) + .await + .context("failed to load sorted witness key")?; + + let unsorted_witness_key = RamPermutationQueueWitnessKey { + block_number: l1_batch_number, + circuit_subsequence_number: aux_data.circuit_subsequence_number as usize, + is_sorted: false, + }; + let unsorted_witness: RamPermutationQueueWitness = self + .object_store + .get(unsorted_witness_key) + .await + .context("failed to load unsorted witness key")?; + + let mut witness = circuit_instance.witness.take().unwrap(); + witness.unsorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: unsorted_witness.witness.into(), + }; + witness.sorted_queue_witness = FullStateCircuitQueueRawWitness { + elements: sorted_witness.witness.into(), + }; + circuit_instance.witness.store(Some(witness)); + + return Ok(CircuitWrapper::Base( + ZkSyncBaseLayerCircuit::RAMPermutation(circuit_instance), + )); + } + Err(anyhow::anyhow!( + "unexpected circuit received with partial witness, expected RAM permutation, got {:?}", + circuit.short_description() + )) + } + + /// Generates witness vector, with persistence of execution. + async fn generate( + &self, + prover_job: ProverJob, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + let start_time = Instant::now(); + let finalization_hints = self + .finalization_hints_cache + .get(&prover_job.setup_data_key) + .context(format!( + "failed to get finalization hints for key {:?}", + &prover_job.setup_data_key + ))? + .clone(); + let job_id = prover_job.job_id; + let task = tokio::task::spawn_blocking(move || { + let block_number = prover_job.block_number; + let _span = tracing::info_span!("witness_vector_generator", %block_number).entered(); + Self::generate_witness_vector(prover_job, finalization_hints) + }); + + self.finish_task(job_id, start_time, task, cancellation_token.clone()) + .await?; + + tracing::info!( + "Witness Vector Generator finished job {:?} in: {:?}", + job_id, + start_time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .job_finished_time + .observe(start_time.elapsed()); + Ok(()) + } + + /// Generates witness vector using crypto primitives. + #[tracing::instrument( + skip_all, + fields(l1_batch = % prover_job.block_number) + )] + pub fn generate_witness_vector( + prover_job: ProverJob, + finalization_hints: Arc, + ) -> anyhow::Result { + let time = Instant::now(); + let cs = match prover_job.circuit_wrapper.clone() { + CircuitWrapper::Base(base_circuit) => { + base_circuit.synthesis::(&finalization_hints) + } + CircuitWrapper::Recursive(recursive_circuit) => { + recursive_circuit.synthesis::(&finalization_hints) + } + // circuit must be hydrated during `get_job` + CircuitWrapper::BasePartial(_) => { + return Err(anyhow::anyhow!("received unexpected dehydrated proof")); + } + }; + WITNESS_VECTOR_GENERATOR_METRICS + .crypto_primitive_time + .observe(time.elapsed()); + Ok(WitnessVectorArtifactsTemp::new( + cs.witness.unwrap(), + prover_job, + time, + )) + } + + /// Runs task to completion and persists result. + /// NOTE: Task may be cancelled mid-flight. + async fn finish_task( + &self, + job_id: u32, + time: Instant, + task: JoinHandle>, + cancellation_token: CancellationToken, + ) -> anyhow::Result<()> { + tokio::select! { + _ = cancellation_token.cancelled() => { + tracing::info!("Stop signal received, shutting down Witness Vector Generator..."); + return Ok(()) + } + result = task => { + let error_message = match result { + Ok(Ok(witness_vector)) => { + tracing::info!("Witness Vector Generator executed job {:?} in: {:?}", job_id, time.elapsed()); + WITNESS_VECTOR_GENERATOR_METRICS.execution_time.observe(time.elapsed()); + self + .save_result(witness_vector, job_id) + .await + .context("failed to save result")?; + return Ok(()) + } + Ok(Err(error)) => error.to_string(), + Err(error) => try_extract_panic_message(error), + }; + tracing::error!("Witness Vector Generator failed on job {job_id:?} with error {error_message:?}"); + + self.save_failure(job_id, error_message).await.context("failed to save failure")?; + } + } + + Ok(()) + } + + /// Sends proof to Circuit Prover. + async fn save_result( + &self, + artifacts: WitnessVectorArtifactsTemp, + job_id: u32, + ) -> anyhow::Result<()> { + let time = Instant::now(); + self.sender + .send(artifacts) + .await + .context("failed to send witness vector to prover")?; + tracing::info!( + "Witness Vector Generator sent job {:?} after {:?}", + job_id, + time.elapsed() + ); + WITNESS_VECTOR_GENERATOR_METRICS + .send_time + .observe(time.elapsed()); + Ok(()) + } + + /// Persists job execution error to database + async fn save_failure(&self, job_id: u32, error: String) -> anyhow::Result<()> { + self.connection_pool + .connection() + .await + .context("failed to get db connection")? + .fri_prover_jobs_dal() + .save_proof_error(job_id, error) + .await; + Ok(()) + } + + /// Backs off, whilst being cancellation aware. + async fn backoff(&self, backoff: &mut Backoff, cancellation_token: CancellationToken) { + let backoff_duration = backoff.delay(); + tracing::info!("Backing off for {:?}...", backoff_duration); + // Error here corresponds to a timeout w/o receiving task cancel; we're OK with this. + tokio::time::timeout(backoff_duration, cancellation_token.cancelled()) + .await + .ok(); + } +} diff --git a/prover/crates/bin/prover_fri/src/prover_job_processor.rs b/prover/crates/bin/prover_fri/src/prover_job_processor.rs index bbfb1d5a832..5e8740d1b72 100644 --- a/prover/crates/bin/prover_fri/src/prover_job_processor.rs +++ b/prover/crates/bin/prover_fri/src/prover_job_processor.rs @@ -90,7 +90,7 @@ impl Prover { let started_at = Instant::now(); let artifact: GoldilocksProverSetupData = self .keystore - .load_cpu_setup_data_for_circuit_type(key.clone()) + .load_cpu_setup_data_for_circuit_type(key) .context("get_cpu_setup_data_for_circuit_type()")?; METRICS.gpu_setup_data_load_time[&key.circuit_id.to_string()] .observe(started_at.elapsed()); @@ -226,7 +226,7 @@ impl JobProcessor for Prover { _started_at: Instant, ) -> JoinHandle> { let config = Arc::clone(&self.config); - let setup_data = self.get_setup_data(job.setup_data_key.clone()); + let setup_data = self.get_setup_data(job.setup_data_key); tokio::task::spawn_blocking(move || { let block_number = job.block_number; let _span = tracing::info_span!("cpu_prove", %block_number).entered(); @@ -307,7 +307,7 @@ pub fn load_setup_data_cache( for prover_setup_metadata in prover_setup_metadata_list { let key = setup_metadata_to_setup_data_key(&prover_setup_metadata); let setup_data = keystore - .load_cpu_setup_data_for_circuit_type(key.clone()) + .load_cpu_setup_data_for_circuit_type(key) .context("get_cpu_setup_data_for_circuit_type()")?; cache.insert(key, Arc::new(setup_data)); } diff --git a/prover/crates/bin/prover_fri/src/utils.rs b/prover/crates/bin/prover_fri/src/utils.rs index 181dc857c36..86e6568f8e4 100644 --- a/prover/crates/bin/prover_fri/src/utils.rs +++ b/prover/crates/bin/prover_fri/src/utils.rs @@ -200,7 +200,7 @@ mod tests { round: AggregationRound::BasicCircuits, }; - let result = get_setup_data_key(key.clone()); + let result = get_setup_data_key(key); // Check if the key has remained same assert_eq!(key, result); diff --git a/prover/crates/bin/prover_job_monitor/Cargo.toml b/prover/crates/bin/prover_job_monitor/Cargo.toml index 160d3a603e3..a4bf8765a94 100644 --- a/prover/crates/bin/prover_job_monitor/Cargo.toml +++ b/prover/crates/bin/prover_job_monitor/Cargo.toml @@ -16,6 +16,7 @@ zksync_prover_dal.workspace = true zksync_utils.workspace = true zksync_types.workspace = true zksync_config = { workspace = true, features = ["observability_ext"] } +zksync_db_connection.workspace = true vise.workspace = true @@ -25,3 +26,5 @@ clap = { workspace = true, features = ["derive"] } ctrlc = { workspace = true, features = ["termination"] } tracing.workspace = true async-trait.workspace = true +serde.workspace = true +axum.workspace = true diff --git a/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs new file mode 100644 index 00000000000..aff78409dbb --- /dev/null +++ b/prover/crates/bin/prover_job_monitor/src/autoscaler_queue_reporter.rs @@ -0,0 +1,176 @@ +use std::collections::HashMap; + +use axum::{ + http::StatusCode, + response::{IntoResponse, Response}, + routing::get, + Json, Router, +}; +use serde::{Deserialize, Serialize}; +use zksync_db_connection::error::DalError; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, + prover_dal::JobCountStatistics, +}; + +#[derive(Debug, Clone)] +pub struct AutoscalerQueueReporter { + connection_pool: ConnectionPool, +} + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct QueueReport { + pub basic_witness_jobs: JobCountStatistics, + pub leaf_witness_jobs: JobCountStatistics, + pub node_witness_jobs: JobCountStatistics, + pub recursion_tip_witness_jobs: JobCountStatistics, + pub scheduler_witness_jobs: JobCountStatistics, + pub prover_jobs: JobCountStatistics, + pub proof_compressor_jobs: JobCountStatistics, +} + +#[derive(Default, Debug, Serialize, Deserialize)] +pub struct VersionedQueueReport { + pub version: ProtocolSemanticVersion, + pub report: QueueReport, +} + +impl AutoscalerQueueReporter { + pub fn new(connection_pool: ConnectionPool) -> Self { + Self { connection_pool } + } + + pub async fn get_report(&self) -> Result>, ProcessorError> { + tracing::debug!("Received request to get queue report"); + + let mut result = HashMap::::new(); + + for round in AggregationRound::ALL_ROUNDS { + self.get_witness_jobs_report(round, &mut result).await?; + } + + self.get_prover_jobs_report(&mut result).await?; + self.get_proof_compressor_jobs_report(&mut result).await?; + + Ok(Json( + result + .into_iter() + .map(|(version, report)| VersionedQueueReport { version, report }) + .collect(), + )) + } + + async fn get_witness_jobs_report( + &self, + aggregation_round: AggregationRound, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_witness_jobs_stats(aggregation_round) + .await; + + for (protocol_version, job_stats) in stats { + let report = state.entry(protocol_version).or_default(); + + match aggregation_round { + AggregationRound::BasicCircuits => report.basic_witness_jobs = job_stats, + AggregationRound::LeafAggregation => report.leaf_witness_jobs = job_stats, + AggregationRound::NodeAggregation => report.node_witness_jobs = job_stats, + AggregationRound::RecursionTip => report.recursion_tip_witness_jobs = job_stats, + AggregationRound::Scheduler => report.scheduler_witness_jobs = job_stats, + } + } + Ok(()) + } + + async fn get_prover_jobs_report( + &self, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_prover_jobs_dal() + .get_generic_prover_jobs_stats() + .await; + + for (protocol_version, stats) in stats { + let report = state.entry(protocol_version).or_default(); + + report.prover_jobs = stats; + } + Ok(()) + } + + async fn get_proof_compressor_jobs_report( + &self, + state: &mut HashMap, + ) -> anyhow::Result<()> { + let stats = self + .connection_pool + .connection() + .await? + .fri_proof_compressor_dal() + .get_jobs_stats() + .await; + + for (protocol_version, stats) in stats { + let report = state.entry(protocol_version).or_default(); + + report.proof_compressor_jobs = stats; + } + + Ok(()) + } +} + +pub fn get_queue_reporter_router(connection_pool: ConnectionPool) -> Router { + let autoscaler_queue_reporter = AutoscalerQueueReporter::new(connection_pool); + + Router::new().route( + "/queue_report", + get(move || async move { autoscaler_queue_reporter.get_report().await }), + ) +} + +pub enum ProcessorError { + Dal(DalError), + Custom(String), +} + +impl From for ProcessorError { + fn from(err: DalError) -> Self { + ProcessorError::Dal(err) + } +} + +impl From for ProcessorError { + fn from(err: anyhow::Error) -> Self { + ProcessorError::Custom(err.to_string()) + } +} + +impl IntoResponse for ProcessorError { + fn into_response(self) -> Response { + let (status_code, message) = match self { + ProcessorError::Dal(err) => { + tracing::error!("Sqlx error: {:?}", err); + ( + StatusCode::INTERNAL_SERVER_ERROR, + "Failed getting data from database", + ) + } + ProcessorError::Custom(err) => { + tracing::error!("Custom error invoked: {:?}", &err); + (StatusCode::INTERNAL_SERVER_ERROR, "Internal error") + } + }; + (status_code, message).into_response() + } +} diff --git a/prover/crates/bin/prover_job_monitor/src/lib.rs b/prover/crates/bin/prover_job_monitor/src/lib.rs index 60d8be297cf..0d6a0ebe104 100644 --- a/prover/crates/bin/prover_job_monitor/src/lib.rs +++ b/prover/crates/bin/prover_job_monitor/src/lib.rs @@ -1,4 +1,5 @@ pub mod archiver; +pub mod autoscaler_queue_reporter; pub mod job_requeuer; pub(crate) mod metrics; pub mod queue_reporter; diff --git a/prover/crates/bin/prover_job_monitor/src/main.rs b/prover/crates/bin/prover_job_monitor/src/main.rs index 734a4bac38a..9195b92882d 100644 --- a/prover/crates/bin/prover_job_monitor/src/main.rs +++ b/prover/crates/bin/prover_job_monitor/src/main.rs @@ -1,3 +1,5 @@ +use std::{future::IntoFuture, net::SocketAddr}; + use anyhow::Context as _; use clap::Parser; use tokio::{ @@ -12,6 +14,7 @@ use zksync_core_leftovers::temp_config_store::{load_database_secrets, load_gener use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_job_monitor::{ archiver::{GpuProverArchiver, ProverJobsArchiver}, + autoscaler_queue_reporter::get_queue_reporter_router, job_requeuer::{ProofCompressorJobRequeuer, ProverJobRequeuer, WitnessGeneratorJobRequeuer}, queue_reporter::{ ProofCompressorQueueReporter, ProverQueueReporter, WitnessGeneratorQueueReporter, @@ -85,21 +88,42 @@ async fn main() -> anyhow::Result<()> { let mut tasks = vec![tokio::spawn(exporter_config.run(stop_receiver.clone()))]; tasks.extend(get_tasks( - connection_pool, - prover_job_monitor_config, + connection_pool.clone(), + prover_job_monitor_config.clone(), proof_compressor_config, prover_config, witness_generator_config, prover_group_config, - stop_receiver, + stop_receiver.clone(), )?); let mut tasks = ManagedTasks::new(tasks); + let bind_address = SocketAddr::from(([0, 0, 0, 0], prover_job_monitor_config.http_port)); + + tracing::info!("Starting PJM server on {bind_address}"); + + let listener = tokio::net::TcpListener::bind(bind_address) + .await + .with_context(|| format!("Failed binding PJM server to {bind_address}"))?; + + let mut receiver = stop_receiver.clone(); + let app = axum::serve(listener, get_queue_reporter_router(connection_pool)) + .with_graceful_shutdown(async move { + if receiver.changed().await.is_err() { + tracing::warn!( + "Stop signal sender for PJM server was dropped without sending a signal" + ); + } + tracing::info!("Stop signal received, PJM server is shutting down"); + }) + .into_future(); + tokio::select! { _ = tasks.wait_single() => {}, _ = stop_signal_receiver => { tracing::info!("Stop signal received, shutting down"); } + _ = app => {} } stop_sender.send(true).ok(); tasks.complete(graceful_shutdown_timeout).await; diff --git a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs index 5f507a75364..914f2e9ca85 100644 --- a/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs +++ b/prover/crates/bin/prover_job_monitor/src/queue_reporter/witness_generator_queue_reporter.rs @@ -58,7 +58,7 @@ impl Task for WitnessGeneratorQueueReporter { .fri_witness_generator_dal() .get_witness_jobs_stats(round) .await; - for ((round, semantic_protocol_version), job_stats) in stats { + for (semantic_protocol_version, job_stats) in stats { Self::emit_metrics_for_round(round, semantic_protocol_version, &job_stats); } } diff --git a/prover/crates/bin/witness_generator/src/artifacts.rs b/prover/crates/bin/witness_generator/src/artifacts.rs index f509d3b2f64..0c6044692dd 100644 --- a/prover/crates/bin/witness_generator/src/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/artifacts.rs @@ -1,50 +1,40 @@ -use std::time::Instant; +use std::{sync::Arc, time::Instant}; use async_trait::async_trait; use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover}; #[derive(Debug)] -pub(crate) struct AggregationBlobUrls { - pub aggregations_urls: String, +pub struct AggregationBlobUrls { + pub aggregation_urls: String, pub circuit_ids_and_urls: Vec<(u8, String)>, } -#[derive(Debug)] -pub(crate) struct SchedulerBlobUrls { - pub circuit_ids_and_urls: Vec<(u8, String)>, - pub closed_form_inputs_and_urls: Vec<(u8, String, usize)>, - pub scheduler_witness_url: String, -} - -pub(crate) enum BlobUrls { - Url(String), - Aggregation(AggregationBlobUrls), - Scheduler(SchedulerBlobUrls), -} - #[async_trait] -pub(crate) trait ArtifactsManager { +pub trait ArtifactsManager { type InputMetadata; type InputArtifacts; - type OutputArtifacts; + type OutputArtifacts: Send + Clone + 'static; + type BlobUrls; async fn get_artifacts( metadata: &Self::InputMetadata, object_store: &dyn ObjectStore, ) -> anyhow::Result; - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls; + shall_save_to_public_bucket: bool, + public_blob_store: Option>, + ) -> Self::BlobUrls; - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: Self::BlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()>; } diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs b/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs deleted file mode 100644 index 08732689e3a..00000000000 --- a/prover/crates/bin/witness_generator/src/basic_circuits/job_processor.rs +++ /dev/null @@ -1,153 +0,0 @@ -use std::{sync::Arc, time::Instant}; - -use anyhow::Context as _; -use tracing::Instrument; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::{get_current_pod_name, AuxOutputWitnessWrapper}; -use zksync_queued_job_processor::{async_trait, JobProcessor}; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - -use crate::{ - artifacts::{ArtifactsManager, BlobUrls, SchedulerBlobUrls}, - basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, - metrics::WITNESS_GENERATOR_METRICS, -}; - -#[async_trait] -impl JobProcessor for BasicWitnessGenerator { - type Job = BasicWitnessGeneratorJob; - type JobId = L1BatchNumber; - // The artifact is optional to support skipping blocks when sampling is enabled. - type JobArtifacts = Option; - - const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let last_l1_batch_to_process = self.config.last_l1_batch_to_process(); - let pod_name = get_current_pod_name(); - match prover_connection - .fri_witness_generator_dal() - .get_next_basic_circuit_witness_job( - last_l1_batch_to_process, - self.protocol_version, - &pod_name, - ) - .await - { - Some(block_number) => { - tracing::info!( - "Processing FRI basic witness-gen for block {}", - block_number - ); - let started_at = Instant::now(); - let job = Self::get_artifacts(&block_number, &*self.object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - - Ok(Some((block_number, job))) - } - None => Ok(None), - } - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_witness_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: BasicWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle>> { - let object_store = Arc::clone(&self.object_store); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - let block_number = job.block_number; - Ok( - Self::process_job_impl(object_store, job, started_at, max_circuits_in_flight) - .instrument(tracing::info_span!("basic_circuit", %block_number)) - .await, - ) - }) - } - - #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - optional_artifacts: Option, - ) -> anyhow::Result<()> { - match optional_artifacts { - None => Ok(()), - Some(artifacts) => { - let blob_started_at = Instant::now(); - let circuit_urls = artifacts.circuit_urls.clone(); - let queue_urls = artifacts.queue_urls.clone(); - - let aux_output_witness_wrapper = - AuxOutputWitnessWrapper(artifacts.aux_output_witness.clone()); - if self.config.shall_save_to_public_bucket { - self.public_blob_store.as_deref() - .expect("public_object_store shall not be empty while running with shall_save_to_public_bucket config") - .put(job_id, &aux_output_witness_wrapper) - .await - .unwrap(); - } - - let scheduler_witness_url = - match Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store) - .await - { - BlobUrls::Url(url) => url, - _ => unreachable!(), - }; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::BasicCircuits.into()] - .observe(blob_started_at.elapsed()); - - Self::update_database( - &self.prover_connection_pool, - job_id.0, - started_at, - BlobUrls::Scheduler(SchedulerBlobUrls { - circuit_ids_and_urls: circuit_urls, - closed_form_inputs_and_urls: queue_urls, - scheduler_witness_url, - }), - artifacts, - ) - .await?; - Ok(()) - } - } - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for BasicWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_basic_circuit_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for BasicWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs deleted file mode 100644 index e032084151e..00000000000 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/job_processor.rs +++ /dev/null @@ -1,124 +0,0 @@ -use std::time::Instant; - -use anyhow::Context as _; -use async_trait::async_trait; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::get_current_pod_name; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::AggregationRound; - -use crate::{ - artifacts::ArtifactsManager, - leaf_aggregation::{ - prepare_leaf_aggregation_job, LeafAggregationArtifacts, LeafAggregationWitnessGenerator, - LeafAggregationWitnessGeneratorJob, - }, - metrics::WITNESS_GENERATOR_METRICS, -}; - -#[async_trait] -impl JobProcessor for LeafAggregationWitnessGenerator { - type Job = LeafAggregationWitnessGeneratorJob; - type JobId = u32; - type JobArtifacts = LeafAggregationArtifacts; - - const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(metadata) = prover_connection - .fri_witness_generator_dal() - .get_next_leaf_aggregation_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - tracing::info!("Processing leaf aggregation job {:?}", metadata.id); - Ok(Some(( - metadata.id, - prepare_leaf_aggregation_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_leaf_aggregation_job()")?, - ))) - } - - async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_leaf_aggregation_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: LeafAggregationWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) - }) - } - - async fn save_result( - &self, - job_id: u32, - started_at: Instant, - artifacts: LeafAggregationArtifacts, - ) -> anyhow::Result<()> { - let block_number = artifacts.block_number; - let circuit_id = artifacts.circuit_id; - tracing::info!( - "Saving leaf aggregation artifacts for block {} with circuit {}", - block_number.0, - circuit_id, - ); - - let blob_save_started_at = Instant::now(); - - let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] - .observe(blob_save_started_at.elapsed()); - - tracing::info!( - "Saved leaf aggregation artifacts for block {} with circuit {}", - block_number.0, - circuit_id, - ); - Self::update_database( - &self.prover_connection_pool, - job_id, - started_at, - blob_urls, - artifacts, - ) - .await?; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for LeafAggregationWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_leaf_aggregation_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for LeafAggregationWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs deleted file mode 100644 index d669a4cc97e..00000000000 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/mod.rs +++ /dev/null @@ -1,263 +0,0 @@ -use std::{sync::Arc, time::Instant}; - -use anyhow::Context as _; -use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; -use tokio::sync::Semaphore; -use zkevm_test_harness::{ - witness::recursive_aggregation::{ - compute_leaf_params, create_leaf_witness, split_recursion_queue, - }, - zkevm_circuits::scheduler::aux::BaseLayerCircuitType, -}; -use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; -use zksync_prover_fri_types::{ - circuit_definitions::{ - boojum::field::goldilocks::GoldilocksField, - circuit_definitions::base_layer::{ - ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerVerificationKey, - }, - encodings::recursion_request::RecursionQueueSimulator, - zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, - }, - FriProofWrapper, -}; -use zksync_prover_keystore::keystore::Keystore; -use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, - prover_dal::LeafAggregationJobMetadata, L1BatchNumber, -}; - -use crate::{ - artifacts::ArtifactsManager, - metrics::WITNESS_GENERATOR_METRICS, - utils::{ - load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts, - ClosedFormInputWrapper, - }, -}; - -mod artifacts; -mod job_processor; - -pub struct LeafAggregationWitnessGeneratorJob { - pub(crate) circuit_id: u8, - pub(crate) block_number: L1BatchNumber, - pub(crate) closed_form_inputs: ClosedFormInputWrapper, - pub(crate) proofs_ids: Vec, - pub(crate) base_vk: ZkSyncBaseLayerVerificationKey, - pub(crate) leaf_params: RecursionLeafParametersWitness, -} - -#[derive(Debug)] -pub struct LeafAggregationWitnessGenerator { - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, -} - -#[derive(Clone)] -pub struct LeafAggregationArtifacts { - circuit_id: u8, - block_number: L1BatchNumber, - pub aggregations: Vec<(u64, RecursionQueueSimulator)>, - pub circuit_ids_and_urls: Vec<(u8, String)>, - #[allow(dead_code)] - closed_form_inputs: Vec>, -} - -impl LeafAggregationWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, - ) -> Self { - Self { - config, - object_store, - prover_connection_pool, - protocol_version, - keystore, - } - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %leaf_job.block_number, circuit_id = %leaf_job.circuit_id) - )] - pub async fn process_job_impl( - leaf_job: LeafAggregationWitnessGeneratorJob, - started_at: Instant, - object_store: Arc, - max_circuits_in_flight: usize, - ) -> LeafAggregationArtifacts { - tracing::info!( - "Starting witness generation of type {:?} for block {} with circuit {}", - AggregationRound::LeafAggregation, - leaf_job.block_number.0, - leaf_job.circuit_id, - ); - process_leaf_aggregation_job(started_at, leaf_job, object_store, max_circuits_in_flight) - .await - } -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) -)] -pub async fn prepare_leaf_aggregation_job( - metadata: LeafAggregationJobMetadata, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let closed_form_input = - LeafAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; - - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - let started_at = Instant::now(); - let base_vk = keystore - .load_base_layer_verification_key(metadata.circuit_id) - .context("get_base_layer_vk_for_circuit_type()")?; - - let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( - BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), - ) as u8; - - let leaf_vk = keystore - .load_recursive_layer_verification_key(leaf_circuit_id) - .context("get_recursive_layer_vk_for_circuit_type()")?; - let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - Ok(LeafAggregationWitnessGeneratorJob { - circuit_id: metadata.circuit_id, - block_number: metadata.block_number, - closed_form_inputs: closed_form_input, - proofs_ids: metadata.prover_job_ids_for_proofs, - base_vk, - leaf_params, - }) -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) -)] -pub async fn process_leaf_aggregation_job( - started_at: Instant, - job: LeafAggregationWitnessGeneratorJob, - object_store: Arc, - max_circuits_in_flight: usize, -) -> LeafAggregationArtifacts { - let circuit_id = job.circuit_id; - let queues = split_recursion_queue(job.closed_form_inputs.1); - - assert_eq!(circuit_id, job.base_vk.numeric_circuit_type()); - - let aggregations = queues - .iter() - .cloned() - .map(|queue| (circuit_id as u64, queue)) - .collect(); - - let mut proof_ids_iter = job.proofs_ids.into_iter(); - let mut proofs_ids = vec![]; - for queue in queues.iter() { - let proofs_ids_for_queue: Vec<_> = (&mut proof_ids_iter) - .take(queue.num_items as usize) - .collect(); - assert_eq!(queue.num_items as usize, proofs_ids_for_queue.len()); - proofs_ids.push(proofs_ids_for_queue); - } - - let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); - - let mut handles = vec![]; - for (circuit_idx, (queue, proofs_ids_for_queue)) in - queues.into_iter().zip(proofs_ids).enumerate() - { - let semaphore = semaphore.clone(); - - let object_store = object_store.clone(); - let queue = queue.clone(); - let base_vk = job.base_vk.clone(); - let leaf_params = (circuit_id, job.leaf_params.clone()); - - let handle = tokio::task::spawn(async move { - let _permit = semaphore - .acquire() - .await - .expect("failed to get permit to process queues chunk"); - - let proofs = load_proofs_for_job_ids(&proofs_ids_for_queue, &*object_store).await; - let base_proofs = proofs - .into_iter() - .map(|wrapper| match wrapper { - FriProofWrapper::Base(base_proof) => base_proof, - FriProofWrapper::Recursive(_) => { - panic!( - "Expected only base proofs for leaf agg {} {}", - job.circuit_id, job.block_number - ); - } - }) - .collect(); - - let (_, circuit) = create_leaf_witness( - circuit_id.into(), - queue, - base_proofs, - &base_vk, - &leaf_params, - ); - - save_recursive_layer_prover_input_artifacts( - job.block_number, - circuit_idx, - vec![circuit], - AggregationRound::LeafAggregation, - 0, - &*object_store, - None, - ) - .await - }); - - handles.push(handle); - } - - let circuit_ids_and_urls_results = futures::future::join_all(handles).await; - let circuit_ids_and_urls = circuit_ids_and_urls_results - .into_iter() - .flat_map(|x| x.unwrap()) - .collect(); - - WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::LeafAggregation.into()] - .observe(started_at.elapsed()); - - tracing::info!( - "Leaf witness generation for block {} with circuit id {}: is complete in {:?}.", - job.block_number.0, - circuit_id, - started_at.elapsed(), - ); - - LeafAggregationArtifacts { - circuit_id, - block_number: job.block_number, - aggregations, - circuit_ids_and_urls, - closed_form_inputs: job.closed_form_inputs.0, - } -} diff --git a/prover/crates/bin/witness_generator/src/lib.rs b/prover/crates/bin/witness_generator/src/lib.rs index c0ac9718c6e..651535e2e80 100644 --- a/prover/crates/bin/witness_generator/src/lib.rs +++ b/prover/crates/bin/witness_generator/src/lib.rs @@ -2,13 +2,9 @@ #![feature(generic_const_exprs)] pub mod artifacts; -pub mod basic_circuits; -pub mod leaf_aggregation; pub mod metrics; -pub mod node_aggregation; pub mod precalculated_merkle_paths_provider; -pub mod recursion_tip; -pub mod scheduler; +pub mod rounds; mod storage_oracle; #[cfg(test)] mod tests; diff --git a/prover/crates/bin/witness_generator/src/main.rs b/prover/crates/bin/witness_generator/src/main.rs index 9d75d8ddc6f..38aacf5d7a5 100644 --- a/prover/crates/bin/witness_generator/src/main.rs +++ b/prover/crates/bin/witness_generator/src/main.rs @@ -20,9 +20,10 @@ use zksync_types::{basic_fri_types::AggregationRound, protocol_version::Protocol use zksync_utils::wait_for_tasks::ManagedTasks; use zksync_vlog::prometheus::PrometheusExporterConfig; use zksync_witness_generator::{ - basic_circuits::BasicWitnessGenerator, leaf_aggregation::LeafAggregationWitnessGenerator, - metrics::SERVER_METRICS, node_aggregation::NodeAggregationWitnessGenerator, - recursion_tip::RecursionTipWitnessGenerator, scheduler::SchedulerWitnessGenerator, + metrics::SERVER_METRICS, + rounds::{ + BasicCircuits, LeafAggregation, NodeAggregation, RecursionTip, Scheduler, WitnessGenerator, + }, }; #[cfg(not(target_env = "msvc"))] @@ -132,15 +133,14 @@ async fn main() -> anyhow::Result<()> { .listener_port }; - let prover_connection_pool = - ConnectionPool::::singleton(database_secrets.prover_url()?) - .build() - .await - .context("failed to build a prover_connection_pool")?; + let connection_pool = ConnectionPool::::singleton(database_secrets.prover_url()?) + .build() + .await + .context("failed to build a prover_connection_pool")?; let (stop_sender, stop_receiver) = watch::channel(false); let protocol_version = PROVER_PROTOCOL_SEMANTIC_VERSION; - ensure_protocol_alignment(&prover_connection_pool, protocol_version, &keystore) + ensure_protocol_alignment(&connection_pool, protocol_version, &keystore) .await .unwrap_or_else(|err| panic!("Protocol alignment check failed: {:?}", err)); @@ -191,65 +191,71 @@ async fn main() -> anyhow::Result<()> { &protocol_version ); + let public_blob_store = match config.shall_save_to_public_bucket { + false => None, + true => Some( + ObjectStoreFactory::new( + prover_config + .public_object_store + .clone() + .expect("public_object_store"), + ) + .create_store() + .await?, + ), + }; + let witness_generator_task = match round { AggregationRound::BasicCircuits => { - let public_blob_store = match config.shall_save_to_public_bucket { - false => None, - true => Some( - ObjectStoreFactory::new( - prover_config - .public_object_store - .clone() - .expect("public_object_store"), - ) - .create_store() - .await?, - ), - }; - let generator = BasicWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, public_blob_store, - prover_connection_pool.clone(), + connection_pool.clone(), protocol_version, + keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::LeafAggregation => { - let generator = LeafAggregationWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, - prover_connection_pool.clone(), + public_blob_store, + connection_pool.clone(), protocol_version, keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::NodeAggregation => { - let generator = NodeAggregationWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, - prover_connection_pool.clone(), + public_blob_store, + connection_pool.clone(), protocol_version, keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::RecursionTip => { - let generator = RecursionTipWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, - prover_connection_pool.clone(), + public_blob_store, + connection_pool.clone(), protocol_version, keystore.clone(), ); generator.run(stop_receiver.clone(), opt.batch_size) } AggregationRound::Scheduler => { - let generator = SchedulerWitnessGenerator::new( + let generator = WitnessGenerator::::new( config.clone(), store_factory.create_store().await?, - prover_connection_pool.clone(), + public_blob_store, + connection_pool.clone(), protocol_version, keystore.clone(), ); diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs b/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs deleted file mode 100644 index a015462cd6f..00000000000 --- a/prover/crates/bin/witness_generator/src/node_aggregation/job_processor.rs +++ /dev/null @@ -1,115 +0,0 @@ -use std::time::Instant; - -use anyhow::Context as _; -use async_trait::async_trait; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::get_current_pod_name; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::basic_fri_types::AggregationRound; - -use crate::{ - artifacts::ArtifactsManager, - metrics::WITNESS_GENERATOR_METRICS, - node_aggregation::{ - prepare_job, NodeAggregationArtifacts, NodeAggregationWitnessGenerator, - NodeAggregationWitnessGeneratorJob, - }, -}; - -#[async_trait] -impl JobProcessor for NodeAggregationWitnessGenerator { - type Job = NodeAggregationWitnessGeneratorJob; - type JobId = u32; - type JobArtifacts = NodeAggregationArtifacts; - - const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(metadata) = prover_connection - .fri_witness_generator_dal() - .get_next_node_aggregation_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - tracing::info!("Processing node aggregation job {:?}", metadata.id); - Ok(Some(( - metadata.id, - prepare_job(metadata, &*self.object_store, self.keystore.clone()) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: u32, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_node_aggregation_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: NodeAggregationWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - let object_store = self.object_store.clone(); - let max_circuits_in_flight = self.config.max_circuits_in_flight; - tokio::spawn(async move { - Ok(Self::process_job_impl(job, started_at, object_store, max_circuits_in_flight).await) - }) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = % artifacts.block_number, circuit_id = % artifacts.circuit_id) - )] - async fn save_result( - &self, - job_id: u32, - started_at: Instant, - artifacts: NodeAggregationArtifacts, - ) -> anyhow::Result<()> { - let blob_save_started_at = Instant::now(); - - let blob_urls = Self::save_artifacts(job_id, artifacts.clone(), &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] - .observe(blob_save_started_at.elapsed()); - - Self::update_database( - &self.prover_connection_pool, - job_id, - started_at, - blob_urls, - artifacts, - ) - .await?; - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &u32) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for NodeAggregationWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_node_aggregation_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for NodeAggregationWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs b/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs deleted file mode 100644 index f114724cfec..00000000000 --- a/prover/crates/bin/witness_generator/src/recursion_tip/job_processor.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::time::Instant; - -use anyhow::Context as _; -use async_trait::async_trait; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::get_current_pod_name; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - -use crate::{ - artifacts::ArtifactsManager, - metrics::WITNESS_GENERATOR_METRICS, - recursion_tip::{ - prepare_job, RecursionTipArtifacts, RecursionTipWitnessGenerator, - RecursionTipWitnessGeneratorJob, - }, -}; - -#[async_trait] -impl JobProcessor for RecursionTipWitnessGenerator { - type Job = RecursionTipWitnessGeneratorJob; - type JobId = L1BatchNumber; - type JobArtifacts = RecursionTipArtifacts; - - const SERVICE_NAME: &'static str = "recursion_tip_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some((l1_batch_number, number_of_final_node_jobs)) = prover_connection - .fri_witness_generator_dal() - .get_next_recursion_tip_witness_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - - let final_node_proof_job_ids = prover_connection - .fri_prover_jobs_dal() - .get_final_node_proof_job_ids_for(l1_batch_number) - .await; - - assert_eq!( - final_node_proof_job_ids.len(), - number_of_final_node_jobs as usize, - "recursion tip witness job was scheduled without all final node jobs being completed; expected {}, got {}", - number_of_final_node_jobs, final_node_proof_job_ids.len() - ); - - Ok(Some(( - l1_batch_number, - prepare_job( - l1_batch_number, - final_node_proof_job_ids, - &*self.object_store, - self.keystore.clone(), - ) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_recursion_tip_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: RecursionTipWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || Ok(Self::process_job_sync(job, started_at))) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job_id) - )] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - artifacts: RecursionTipArtifacts, - ) -> anyhow::Result<()> { - let blob_save_started_at = Instant::now(); - - let blob_urls = - Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::RecursionTip.into()] - .observe(blob_save_started_at.elapsed()); - - Self::update_database( - &self.prover_connection_pool, - job_id.0, - started_at, - blob_urls, - artifacts, - ) - .await?; - - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for RecursionTipWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_recursion_tip_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for RecursionTipWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs b/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs deleted file mode 100644 index 4abb56a7d78..00000000000 --- a/prover/crates/bin/witness_generator/src/recursion_tip/mod.rs +++ /dev/null @@ -1,226 +0,0 @@ -use std::{sync::Arc, time::Instant}; - -use anyhow::Context; -use circuit_definitions::{ - circuit_definitions::recursion_layer::{ - recursion_tip::RecursionTipCircuit, ZkSyncRecursionLayerStorageType, - ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, RECURSION_TIP_ARITY, - }, - recursion_layer_proof_config, -}; -use zkevm_test_harness::{ - boojum::{ - field::{ - goldilocks::{GoldilocksExt2, GoldilocksField}, - Field, U64Representable, - }, - gadgets::{ - queue::QueueState, recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, - traits::allocatable::CSAllocatable, - }, - }, - witness::{ - recursive_aggregation::compute_node_vk_commitment, - utils::take_sponge_like_queue_state_from_simulator, - }, - zkevm_circuits::{ - recursion::{ - leaf_layer::input::RecursionLeafParametersWitness, - recursion_tip::{ - input::{RecursionTipInputWitness, RecursionTipInstanceWitness}, - RecursionTipConfig, - }, - }, - scheduler::aux::BaseLayerCircuitType, - }, -}; -use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; -use zksync_prover_fri_types::keys::ClosedFormInputKey; -use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; -use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, -}; - -use crate::{ - artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, utils::ClosedFormInputWrapper, -}; - -mod artifacts; -mod job_processor; - -#[derive(Clone)] -pub struct RecursionTipWitnessGeneratorJob { - block_number: L1BatchNumber, - recursion_tip_witness: RecursionTipInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - node_vk: ZkSyncRecursionLayerVerificationKey, -} - -#[derive(Clone)] -pub struct RecursionTipArtifacts { - pub recursion_tip_circuit: ZkSyncRecursiveLayerCircuit, -} - -#[derive(Debug)] -pub struct RecursionTipWitnessGenerator { - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, -} - -impl RecursionTipWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, - ) -> Self { - Self { - config, - object_store, - prover_connection_pool, - protocol_version, - keystore, - } - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job.block_number) - )] - pub fn process_job_sync( - job: RecursionTipWitnessGeneratorJob, - started_at: Instant, - ) -> RecursionTipArtifacts { - tracing::info!( - "Starting fri witness generation of type {:?} for block {}", - AggregationRound::RecursionTip, - job.block_number.0 - ); - let config = RecursionTipConfig { - proof_config: recursion_layer_proof_config(), - vk_fixed_parameters: job.node_vk.clone().into_inner().fixed_parameters, - _marker: std::marker::PhantomData, - }; - - let recursive_tip_circuit = RecursionTipCircuit { - witness: job.recursion_tip_witness, - config, - transcript_params: (), - _marker: std::marker::PhantomData, - }; - - WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::RecursionTip.into()] - .observe(started_at.elapsed()); - - tracing::info!( - "Recursion tip generation for block {} is complete in {:?}", - job.block_number.0, - started_at.elapsed() - ); - - RecursionTipArtifacts { - recursion_tip_circuit: ZkSyncRecursiveLayerCircuit::RecursionTipCircuit( - recursive_tip_circuit, - ), - } - } -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %l1_batch_number) -)] -pub async fn prepare_job( - l1_batch_number: L1BatchNumber, - final_node_proof_job_ids: Vec<(u8, u32)>, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let recursion_tip_proofs = - RecursionTipWitnessGenerator::get_artifacts(&final_node_proof_job_ids, object_store) - .await?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] - .observe(started_at.elapsed()); - - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type()")?; - - let node_layer_vk_commitment = compute_node_vk_commitment(node_vk.clone()); - - let mut recursion_queues = vec![]; - for circuit_id in BaseLayerCircuitType::as_iter_u8() { - let key = ClosedFormInputKey { - block_number: l1_batch_number, - circuit_id, - }; - let ClosedFormInputWrapper(_, recursion_queue) = object_store.get(key).await?; - recursion_queues.push((circuit_id, recursion_queue)); - } - - // RECURSION_TIP_ARITY is the maximum amount of proof that a single recursion tip can support. - // Given recursion_tip has at most 1 proof per circuit, it implies we can't add more circuit types without bumping arity up. - assert!( - RECURSION_TIP_ARITY >= recursion_queues.len(), - "recursion tip received more circuits ({}) than supported ({})", - recursion_queues.len(), - RECURSION_TIP_ARITY - ); - let mut branch_circuit_type_set = [GoldilocksField::ZERO; RECURSION_TIP_ARITY]; - let mut queue_set: [_; RECURSION_TIP_ARITY] = - std::array::from_fn(|_| QueueState::placeholder_witness()); - - for (index, (circuit_id, recursion_queue)) in recursion_queues.iter().enumerate() { - branch_circuit_type_set[index] = GoldilocksField::from_u64_unchecked(*circuit_id as u64); - queue_set[index] = take_sponge_like_queue_state_from_simulator(recursion_queue); - } - - let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; - assert_eq!( - leaf_vk_commits.len(), - 16, - "expected 16 leaf vk commits, which corresponds to the numebr of circuits, got {}", - leaf_vk_commits.len() - ); - let leaf_layer_parameters: [RecursionLeafParametersWitness; 16] = - leaf_vk_commits - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - - let input = RecursionTipInputWitness { - leaf_layer_parameters, - node_layer_vk_commitment, - branch_circuit_type_set, - queue_set, - }; - - let recursion_tip_witness = RecursionTipInstanceWitness { - input, - vk_witness: node_vk.clone().into_inner(), - proof_witnesses: recursion_tip_proofs.into(), - }; - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::RecursionTip.into()] - .observe(started_at.elapsed()); - - Ok(RecursionTipWitnessGeneratorJob { - block_number: l1_batch_number, - recursion_tip_witness, - node_vk, - }) -} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/artifacts.rs similarity index 72% rename from prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/basic_circuits/artifacts.rs index 3447659f829..2936634fc97 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/artifacts.rs @@ -1,4 +1,4 @@ -use std::time::Instant; +use std::{sync::Arc, time::Instant}; use async_trait::async_trait; use zksync_object_store::ObjectStore; @@ -8,16 +8,17 @@ use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, - basic_circuits::{BasicCircuitArtifacts, BasicWitnessGenerator, BasicWitnessGeneratorJob}, + artifacts::ArtifactsManager, + rounds::basic_circuits::{BasicCircuitArtifacts, BasicCircuits, BasicWitnessGeneratorJob}, utils::SchedulerPartialInputWrapper, }; #[async_trait] -impl ArtifactsManager for BasicWitnessGenerator { +impl ArtifactsManager for BasicCircuits { type InputMetadata = L1BatchNumber; type InputArtifacts = BasicWitnessGeneratorJob; type OutputArtifacts = BasicCircuitArtifacts; + type BlobUrls = String; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -31,38 +32,42 @@ impl ArtifactsManager for BasicWitnessGenerator { }) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { - let aux_output_witness_wrapper = AuxOutputWitnessWrapper(artifacts.aux_output_witness); + shall_save_to_public_bucket: bool, + public_blob_store: Option>, + ) -> String { + let aux_output_witness_wrapper = + AuxOutputWitnessWrapper(artifacts.aux_output_witness.clone()); + if shall_save_to_public_bucket { + public_blob_store.as_deref() + .expect("public_object_store shall not be empty while running with shall_save_to_public_bucket config") + .put(L1BatchNumber(job_id), &aux_output_witness_wrapper) + .await + .unwrap(); + } + object_store .put(L1BatchNumber(job_id), &aux_output_witness_wrapper) .await .unwrap(); let wrapper = SchedulerPartialInputWrapper(artifacts.scheduler_witness); - let url = object_store + object_store .put(L1BatchNumber(job_id), &wrapper) .await - .unwrap(); - - BlobUrls::Url(url) + .unwrap() } #[tracing::instrument(skip_all, fields(l1_batch = %job_id))] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, - _artifacts: Self::OutputArtifacts, + blob_urls: String, + artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_urls = match blob_urls { - BlobUrls::Scheduler(blobs) => blobs, - _ => unreachable!(), - }; - let mut connection = connection_pool .connection() .await @@ -79,7 +84,7 @@ impl ArtifactsManager for BasicWitnessGenerator { .fri_prover_jobs_dal() .insert_prover_jobs( L1BatchNumber(job_id), - blob_urls.circuit_ids_and_urls, + artifacts.circuit_urls, AggregationRound::BasicCircuits, 0, protocol_version_id, @@ -89,8 +94,8 @@ impl ArtifactsManager for BasicWitnessGenerator { .fri_witness_generator_dal() .create_aggregation_jobs( L1BatchNumber(job_id), - &blob_urls.closed_form_inputs_and_urls, - &blob_urls.scheduler_witness_url, + &artifacts.queue_urls, + &blob_urls, get_recursive_layer_circuit_id_for_base_layer, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/rounds/basic_circuits/mod.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/mod.rs new file mode 100644 index 00000000000..adb2bf72d04 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/mod.rs @@ -0,0 +1,136 @@ +use std::{sync::Arc, time::Instant}; + +use async_trait::async_trait; +use circuit_definitions::zkevm_circuits::scheduler::{ + block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, +}; +use zksync_multivm::circuit_sequencer_api_latest::boojum::{ + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, +}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::get_current_pod_name; +use zksync_prover_interface::inputs::WitnessInputData; +use zksync_prover_keystore::keystore::Keystore; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, +}; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + rounds::{basic_circuits::utils::generate_witness, JobManager}, +}; + +mod artifacts; +mod utils; + +#[derive(Clone)] +pub struct BasicCircuitArtifacts { + pub(super) circuit_urls: Vec<(u8, String)>, + pub(super) queue_urls: Vec<(u8, String, usize)>, + pub(super) scheduler_witness: SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + pub(super) aux_output_witness: BlockAuxilaryOutputWitness, +} + +#[derive(Clone)] +pub struct BasicWitnessGeneratorJob { + pub(super) block_number: L1BatchNumber, + pub(super) data: WitnessInputData, +} + +type Witness = ( + Vec<(u8, String)>, + Vec<(u8, String, usize)>, + SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + BlockAuxilaryOutputWitness, +); + +pub struct BasicCircuits; + +#[async_trait] +impl JobManager for BasicCircuits { + type Job = BasicWitnessGeneratorJob; + type Metadata = L1BatchNumber; + + const ROUND: AggregationRound = AggregationRound::BasicCircuits; + const SERVICE_NAME: &'static str = "fri_basic_circuit_witness_generator"; + + async fn process_job( + job: BasicWitnessGeneratorJob, + object_store: Arc, + max_circuits_in_flight: usize, + started_at: Instant, + ) -> anyhow::Result { + let BasicWitnessGeneratorJob { + block_number, + data: job, + } = job; + + tracing::info!( + "Starting witness generation of type {:?} for block {}", + AggregationRound::BasicCircuits, + block_number.0 + ); + + let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = + generate_witness(block_number, object_store, job, max_circuits_in_flight).await; + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + tracing::info!( + "Witness generation for block {} is complete in {:?}", + block_number.0, + started_at.elapsed() + ); + + Ok(BasicCircuitArtifacts { + circuit_urls, + queue_urls, + scheduler_witness, + aux_output_witness, + }) + } + + async fn prepare_job( + metadata: L1BatchNumber, + object_store: &dyn ObjectStore, + _keystore: Keystore, + ) -> anyhow::Result { + tracing::info!("Processing FRI basic witness-gen for block {}", metadata.0); + let started_at = Instant::now(); + let job = Self::get_artifacts(&metadata, object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::BasicCircuits.into()] + .observe(started_at.elapsed()); + + Ok(job) + } + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + if let Some(l1_batch_number) = connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .get_next_basic_circuit_witness_job(protocol_version, &pod_name) + .await + { + Ok(Some((l1_batch_number.0, l1_batch_number))) + } else { + Ok(None) + } + } +} diff --git a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs similarity index 76% rename from prover/crates/bin/witness_generator/src/basic_circuits/mod.rs rename to prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs index c9755c333da..23ae1b0f2af 100644 --- a/prover/crates/bin/witness_generator/src/basic_circuits/mod.rs +++ b/prover/crates/bin/witness_generator/src/rounds/basic_circuits/utils.rs @@ -2,47 +2,33 @@ use std::{ collections::HashSet, hash::{DefaultHasher, Hash, Hasher}, sync::Arc, - time::Instant, }; use circuit_definitions::{ circuit_definitions::base_layer::{ZkSyncBaseLayerCircuit, ZkSyncBaseLayerStorage}, encodings::recursion_request::RecursionQueueSimulator, - zkevm_circuits::{ - fsm_input_output::ClosedFormInputCompactFormWitness, - scheduler::{ - block_header::BlockAuxilaryOutputWitness, input::SchedulerCircuitInstanceWitness, - }, - }, + zkevm_circuits::fsm_input_output::ClosedFormInputCompactFormWitness, }; use tokio::sync::Semaphore; use tracing::Instrument; use zkevm_test_harness::witness::oracle::WitnessGenerationArtifact; -use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_multivm::{ circuit_sequencer_api_latest::{ - boojum::{ - field::goldilocks::{GoldilocksExt2, GoldilocksField}, - gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, - }, - geometry_config::get_geometry_config, + boojum::field::goldilocks::GoldilocksField, geometry_config::get_geometry_config, }, interface::storage::StorageView, vm_latest::{constants::MAX_CYCLES_FOR_TX, HistoryDisabled, StorageOracle as VmStorageOracle}, zk_evm_latest::ethereum_types::Address, }; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; use zksync_prover_fri_types::{keys::ClosedFormInputKey, CircuitAuxData}; use zksync_prover_interface::inputs::WitnessInputData; use zksync_system_constants::BOOTLOADER_ADDRESS; -use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, -}; +use zksync_types::L1BatchNumber; use crate::{ - metrics::WITNESS_GENERATOR_METRICS, precalculated_merkle_paths_provider::PrecalculatedMerklePathsProvider, + rounds::basic_circuits::Witness, storage_oracle::StorageOracle, utils::{ expand_bootloader_contents, save_circuit, save_ram_premutation_queue_witness, @@ -51,144 +37,8 @@ use crate::{ witness::WitnessStorage, }; -mod artifacts; -pub mod job_processor; - -#[derive(Clone)] -pub struct BasicCircuitArtifacts { - pub(super) circuit_urls: Vec<(u8, String)>, - pub(super) queue_urls: Vec<(u8, String, usize)>, - pub(super) scheduler_witness: SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - pub(super) aux_output_witness: BlockAuxilaryOutputWitness, -} - -#[derive(Clone)] -pub struct BasicWitnessGeneratorJob { - pub(super) block_number: L1BatchNumber, - pub(super) data: WitnessInputData, -} - -#[derive(Debug)] -pub struct BasicWitnessGenerator { - config: Arc, - object_store: Arc, - public_blob_store: Option>, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, -} - -type Witness = ( - Vec<(u8, String)>, - Vec<(u8, String, usize)>, - SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - BlockAuxilaryOutputWitness, -); - -impl BasicWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - public_blob_store: Option>, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - ) -> Self { - Self { - config: Arc::new(config), - object_store, - public_blob_store, - prover_connection_pool, - protocol_version, - } - } - - async fn process_job_impl( - object_store: Arc, - basic_job: BasicWitnessGeneratorJob, - started_at: Instant, - max_circuits_in_flight: usize, - ) -> Option { - let BasicWitnessGeneratorJob { - block_number, - data: job, - } = basic_job; - - tracing::info!( - "Starting witness generation of type {:?} for block {}", - AggregationRound::BasicCircuits, - block_number.0 - ); - - Some( - process_basic_circuits_job( - object_store, - started_at, - block_number, - job, - max_circuits_in_flight, - ) - .await, - ) - } -} - #[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -pub(super) async fn process_basic_circuits_job( - object_store: Arc, - started_at: Instant, - block_number: L1BatchNumber, - job: WitnessInputData, - max_circuits_in_flight: usize, -) -> BasicCircuitArtifacts { - let (circuit_urls, queue_urls, scheduler_witness, aux_output_witness) = - generate_witness(block_number, object_store, job, max_circuits_in_flight).await; - WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::BasicCircuits.into()] - .observe(started_at.elapsed()); - tracing::info!( - "Witness generation for block {} is complete in {:?}", - block_number.0, - started_at.elapsed() - ); - - BasicCircuitArtifacts { - circuit_urls, - queue_urls, - scheduler_witness, - aux_output_witness, - } -} - -#[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] -async fn save_recursion_queue( - block_number: L1BatchNumber, - circuit_id: u8, - recursion_queue_simulator: RecursionQueueSimulator, - closed_form_inputs: Vec>, - object_store: Arc, -) -> (u8, String, usize) { - let key = ClosedFormInputKey { - block_number, - circuit_id, - }; - let basic_circuit_count = closed_form_inputs.len(); - let closed_form_inputs = closed_form_inputs - .iter() - .map(|x| ZkSyncBaseLayerStorage::from_inner(circuit_id, x.clone())) - .collect(); - let wrapper = ClosedFormInputWrapper(closed_form_inputs, recursion_queue_simulator); - let blob_url = object_store.put(key, &wrapper).await.unwrap(); - (circuit_id, blob_url, basic_circuit_count) -} - -#[tracing::instrument(skip_all, fields(l1_batch = %block_number))] -async fn generate_witness( +pub(super) async fn generate_witness( block_number: L1BatchNumber, object_store: Arc, input: WitnessInputData, @@ -464,3 +314,25 @@ async fn generate_witness( block_aux_witness, ) } + +#[tracing::instrument(skip_all, fields(l1_batch = %block_number, circuit_id = %circuit_id))] +async fn save_recursion_queue( + block_number: L1BatchNumber, + circuit_id: u8, + recursion_queue_simulator: RecursionQueueSimulator, + closed_form_inputs: Vec>, + object_store: Arc, +) -> (u8, String, usize) { + let key = ClosedFormInputKey { + block_number, + circuit_id, + }; + let basic_circuit_count = closed_form_inputs.len(); + let closed_form_inputs = closed_form_inputs + .iter() + .map(|x| ZkSyncBaseLayerStorage::from_inner(circuit_id, x.clone())) + .collect(); + let wrapper = ClosedFormInputWrapper(closed_form_inputs, recursion_queue_simulator); + let blob_url = object_store.put(key, &wrapper).await.unwrap(); + (circuit_id, blob_url, basic_circuit_count) +} diff --git a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/artifacts.rs similarity index 79% rename from prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/artifacts.rs index a94587d00ec..e3c97dd257c 100644 --- a/prover/crates/bin/witness_generator/src/leaf_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/artifacts.rs @@ -3,22 +3,23 @@ use std::time::Instant; use async_trait::async_trait; use zksync_object_store::ObjectStore; use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; -use zksync_prover_fri_types::keys::ClosedFormInputKey; +use zksync_prover_fri_types::keys::{AggregationsKey, ClosedFormInputKey}; use zksync_prover_fri_utils::get_recursive_layer_circuit_id_for_base_layer; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::LeafAggregationJobMetadata}; use crate::{ - artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, - leaf_aggregation::{LeafAggregationArtifacts, LeafAggregationWitnessGenerator}, + artifacts::{AggregationBlobUrls, ArtifactsManager}, metrics::WITNESS_GENERATOR_METRICS, - utils::{save_node_aggregations_artifacts, ClosedFormInputWrapper}, + rounds::leaf_aggregation::{LeafAggregation, LeafAggregationArtifacts}, + utils::{AggregationWrapper, ClosedFormInputWrapper}, }; #[async_trait] -impl ArtifactsManager for LeafAggregationWitnessGenerator { +impl ArtifactsManager for LeafAggregation { type InputMetadata = LeafAggregationJobMetadata; type InputArtifacts = ClosedFormInputWrapper; type OutputArtifacts = LeafAggregationArtifacts; + type BlobUrls = AggregationBlobUrls; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -41,38 +42,42 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { skip_all, fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) )] - async fn save_artifacts( + async fn save_to_bucket( _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + _shall_save_to_public_bucket: bool, + _public_blob_store: Option>, + ) -> AggregationBlobUrls { let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), - 0, - artifacts.aggregations, - object_store, - ) - .await; + let key = AggregationsKey { + block_number: artifacts.block_number, + circuit_id: get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), + depth: 0, + }; + let aggregation_urls = object_store + .put(key, &AggregationWrapper(artifacts.aggregations)) + .await + .unwrap(); + WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::LeafAggregation.into()] .observe(started_at.elapsed()); - BlobUrls::Aggregation(AggregationBlobUrls { - aggregations_urls, + AggregationBlobUrls { + aggregation_urls, circuit_ids_and_urls: artifacts.circuit_ids_and_urls, - }) + } } #[tracing::instrument( skip_all, fields(l1_batch = %job_id) )] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: AggregationBlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { tracing::info!( @@ -82,11 +87,6 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { artifacts.circuit_id, ); - let blob_urls = match blob_urls { - BlobUrls::Aggregation(blob_urls) => blob_urls, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await.unwrap(); let mut transaction = prover_connection.start_transaction().await.unwrap(); let number_of_dependent_jobs = blob_urls.circuit_ids_and_urls.len(); @@ -124,7 +124,7 @@ impl ArtifactsManager for LeafAggregationWitnessGenerator { get_recursive_layer_circuit_id_for_base_layer(artifacts.circuit_id), number_of_dependent_jobs, 0, - blob_urls.aggregations_urls, + blob_urls.aggregation_urls, ) .await; tracing::info!( diff --git a/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/mod.rs new file mode 100644 index 00000000000..451ceee390d --- /dev/null +++ b/prover/crates/bin/witness_generator/src/rounds/leaf_aggregation/mod.rs @@ -0,0 +1,250 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context as _; +use async_trait::async_trait; +use circuit_definitions::circuit_definitions::recursion_layer::base_circuit_type_into_recursive_leaf_circuit_type; +use tokio::sync::Semaphore; +use zkevm_test_harness::{ + witness::recursive_aggregation::{ + compute_leaf_params, create_leaf_witness, split_recursion_queue, + }, + zkevm_circuits::scheduler::aux::BaseLayerCircuitType, +}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::field::goldilocks::GoldilocksField, + circuit_definitions::base_layer::{ + ZkSyncBaseLayerClosedFormInput, ZkSyncBaseLayerVerificationKey, + }, + encodings::recursion_request::RecursionQueueSimulator, + zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, + }, + get_current_pod_name, FriProofWrapper, +}; +use zksync_prover_keystore::keystore::Keystore; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, + prover_dal::LeafAggregationJobMetadata, L1BatchNumber, +}; + +use crate::{ + artifacts::ArtifactsManager, + metrics::WITNESS_GENERATOR_METRICS, + rounds::JobManager, + utils::{ + load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts, + ClosedFormInputWrapper, + }, +}; + +mod artifacts; + +pub struct LeafAggregationWitnessGeneratorJob { + pub(crate) circuit_id: u8, + pub(crate) block_number: L1BatchNumber, + pub(crate) closed_form_inputs: ClosedFormInputWrapper, + pub(crate) proofs_ids: Vec, + pub(crate) base_vk: ZkSyncBaseLayerVerificationKey, + pub(crate) leaf_params: RecursionLeafParametersWitness, +} + +#[derive(Clone)] +pub struct LeafAggregationArtifacts { + circuit_id: u8, + block_number: L1BatchNumber, + pub aggregations: Vec<(u64, RecursionQueueSimulator)>, + pub circuit_ids_and_urls: Vec<(u8, String)>, + #[allow(dead_code)] + closed_form_inputs: Vec>, +} + +pub struct LeafAggregation; + +#[async_trait] +impl JobManager for LeafAggregation { + type Job = LeafAggregationWitnessGeneratorJob; + type Metadata = LeafAggregationJobMetadata; + + const ROUND: AggregationRound = AggregationRound::LeafAggregation; + const SERVICE_NAME: &'static str = "fri_leaf_aggregation_witness_generator"; + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job.block_number, circuit_id = %job.circuit_id) + )] + async fn process_job( + job: LeafAggregationWitnessGeneratorJob, + object_store: Arc, + max_circuits_in_flight: usize, + started_at: Instant, + ) -> anyhow::Result { + tracing::info!( + "Starting witness generation of type {:?} for block {} with circuit {}", + AggregationRound::LeafAggregation, + job.block_number.0, + job.circuit_id, + ); + let circuit_id = job.circuit_id; + let queues = split_recursion_queue(job.closed_form_inputs.1); + + assert_eq!(circuit_id, job.base_vk.numeric_circuit_type()); + + let aggregations = queues + .iter() + .cloned() + .map(|queue| (circuit_id as u64, queue)) + .collect(); + + let mut proof_ids_iter = job.proofs_ids.into_iter(); + let mut proofs_ids = vec![]; + for queue in queues.iter() { + let proofs_ids_for_queue: Vec<_> = (&mut proof_ids_iter) + .take(queue.num_items as usize) + .collect(); + assert_eq!(queue.num_items as usize, proofs_ids_for_queue.len()); + proofs_ids.push(proofs_ids_for_queue); + } + + let semaphore = Arc::new(Semaphore::new(max_circuits_in_flight)); + + let mut handles = vec![]; + for (circuit_idx, (queue, proofs_ids_for_queue)) in + queues.into_iter().zip(proofs_ids).enumerate() + { + let semaphore = semaphore.clone(); + + let object_store = object_store.clone(); + let queue = queue.clone(); + let base_vk = job.base_vk.clone(); + let leaf_params = (circuit_id, job.leaf_params.clone()); + + let handle = tokio::task::spawn(async move { + let _permit = semaphore + .acquire() + .await + .expect("failed to get permit to process queues chunk"); + + let proofs = load_proofs_for_job_ids(&proofs_ids_for_queue, &*object_store).await; + let base_proofs = proofs + .into_iter() + .map(|wrapper| match wrapper { + FriProofWrapper::Base(base_proof) => base_proof, + FriProofWrapper::Recursive(_) => { + panic!( + "Expected only base proofs for leaf agg {} {}", + job.circuit_id, job.block_number + ); + } + }) + .collect(); + + let (_, circuit) = create_leaf_witness( + circuit_id.into(), + queue, + base_proofs, + &base_vk, + &leaf_params, + ); + + save_recursive_layer_prover_input_artifacts( + job.block_number, + circuit_idx, + vec![circuit], + AggregationRound::LeafAggregation, + 0, + &*object_store, + None, + ) + .await + }); + + handles.push(handle); + } + + let circuit_ids_and_urls_results = futures::future::join_all(handles).await; + let circuit_ids_and_urls = circuit_ids_and_urls_results + .into_iter() + .flat_map(|x| x.unwrap()) + .collect(); + + WITNESS_GENERATOR_METRICS.witness_generation_time + [&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + tracing::info!( + "Leaf witness generation for block {} with circuit id {}: is complete in {:?}.", + job.block_number.0, + circuit_id, + started_at.elapsed(), + ); + + Ok(LeafAggregationArtifacts { + circuit_id, + block_number: job.block_number, + aggregations, + circuit_ids_and_urls, + closed_form_inputs: job.closed_form_inputs.0, + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.block_number, circuit_id = %metadata.circuit_id) + )] + async fn prepare_job( + metadata: LeafAggregationJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let closed_form_input = Self::get_artifacts(&metadata, object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + let started_at = Instant::now(); + let base_vk = keystore + .load_base_layer_verification_key(metadata.circuit_id) + .context("get_base_layer_vk_for_circuit_type()")?; + + let leaf_circuit_id = base_circuit_type_into_recursive_leaf_circuit_type( + BaseLayerCircuitType::from_numeric_value(metadata.circuit_id), + ) as u8; + + let leaf_vk = keystore + .load_recursive_layer_verification_key(leaf_circuit_id) + .context("get_recursive_layer_vk_for_circuit_type()")?; + let leaf_params = compute_leaf_params(metadata.circuit_id, base_vk.clone(), leaf_vk); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::LeafAggregation.into()] + .observe(started_at.elapsed()); + + Ok(LeafAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + closed_form_inputs: closed_form_input, + proofs_ids: metadata.prover_job_ids_for_proofs, + base_vk, + leaf_params, + }) + } + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + let Some(metadata) = connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_next_leaf_aggregation_job(protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + Ok(Some((metadata.id, metadata))) + } +} diff --git a/prover/crates/bin/witness_generator/src/rounds/mod.rs b/prover/crates/bin/witness_generator/src/rounds/mod.rs new file mode 100644 index 00000000000..6fd72c96869 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/rounds/mod.rs @@ -0,0 +1,195 @@ +use std::{marker::PhantomData, sync::Arc, time::Instant}; + +use anyhow::Context; +use async_trait::async_trait; +use tokio::task::JoinHandle; +use zksync_config::configs::FriWitnessGeneratorConfig; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_keystore::keystore::Keystore; +use zksync_queued_job_processor::JobProcessor; +use zksync_types::protocol_version::ProtocolSemanticVersion; + +use crate::artifacts::ArtifactsManager; + +mod basic_circuits; +mod leaf_aggregation; +mod node_aggregation; +mod recursion_tip; +mod scheduler; + +pub use basic_circuits::BasicCircuits; +pub use leaf_aggregation::LeafAggregation; +pub use node_aggregation::NodeAggregation; +pub use recursion_tip::RecursionTip; +pub use scheduler::Scheduler; +use zksync_types::basic_fri_types::AggregationRound; + +use crate::metrics::WITNESS_GENERATOR_METRICS; + +#[async_trait] +pub trait JobManager: ArtifactsManager { + type Job: Send + 'static; + type Metadata: Send + 'static; + + const ROUND: AggregationRound; + const SERVICE_NAME: &'static str; + + async fn process_job( + job: Self::Job, + object_store: Arc, + max_circuits_in_flight: usize, + started_at: Instant, + ) -> anyhow::Result; + + async fn prepare_job( + metadata: Self::Metadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result; + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result>; +} + +#[derive(Debug)] +pub struct WitnessGenerator { + pub config: FriWitnessGeneratorConfig, + pub object_store: Arc, + pub public_blob_store: Option>, + pub connection_pool: ConnectionPool, + pub protocol_version: ProtocolSemanticVersion, + pub keystore: Keystore, + _round: PhantomData, +} + +impl WitnessGenerator +where + R: JobManager + ArtifactsManager, +{ + pub fn new( + config: FriWitnessGeneratorConfig, + object_store: Arc, + public_blob_store: Option>, + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + keystore: Keystore, + ) -> Self { + Self { + config, + object_store, + public_blob_store, + connection_pool, + protocol_version, + keystore, + _round: Default::default(), + } + } +} + +#[async_trait] +impl JobProcessor for WitnessGenerator +where + R: JobManager + ArtifactsManager + Send + Sync, +{ + type Job = R::Job; + type JobId = u32; + type JobArtifacts = R::OutputArtifacts; + + const SERVICE_NAME: &'static str = R::SERVICE_NAME; + + async fn get_next_job(&self) -> anyhow::Result> { + if let Some((id, metadata)) = + R::get_metadata(self.connection_pool.clone(), self.protocol_version) + .await + .context("get_metadata()")? + { + tracing::info!("Processing {:?} job {:?}", R::ROUND, id); + Ok(Some(( + id, + R::prepare_job(metadata, &*self.object_store, self.keystore.clone()) + .await + .context("prepare_job()")?, + ))) + } else { + Ok(None) + } + } + + async fn save_failure(&self, job_id: Self::JobId, _started_at: Instant, error: String) { + self.connection_pool + .connection() + .await + .unwrap() + .fri_witness_generator_dal() + .mark_witness_job_failed(&error, job_id, R::ROUND) + .await; + } + + async fn process_job( + &self, + _job_id: &Self::JobId, + job: Self::Job, + started_at: Instant, + ) -> JoinHandle> { + let object_store = self.object_store.clone(); + let max_circuits_in_flight = self.config.max_circuits_in_flight; + tokio::spawn(async move { + R::process_job(job, object_store, max_circuits_in_flight, started_at).await + }) + } + + #[tracing::instrument(skip_all, fields(job_id = %job_id))] + async fn save_result( + &self, + job_id: Self::JobId, + started_at: Instant, + artifacts: Self::JobArtifacts, + ) -> anyhow::Result<()> { + tracing::info!("Saving {:?} artifacts for job {:?}", R::ROUND, job_id); + + let blob_save_started_at = Instant::now(); + + let blob_urls = R::save_to_bucket( + job_id, + artifacts.clone(), + &*self.object_store, + self.config.shall_save_to_public_bucket, + self.public_blob_store.clone(), + ) + .await; + + WITNESS_GENERATOR_METRICS.blob_save_time[&R::ROUND.into()] + .observe(blob_save_started_at.elapsed()); + + tracing::info!("Saved {:?} artifacts for job {:?}", R::ROUND, job_id); + R::save_to_database( + &self.connection_pool, + job_id, + started_at, + blob_urls, + artifacts, + ) + .await?; + Ok(()) + } + + fn max_attempts(&self) -> u32 { + self.config.max_attempts + } + + async fn get_job_attempts(&self, job_id: &Self::JobId) -> anyhow::Result { + let mut prover_storage = self.connection_pool.connection().await.context(format!( + "failed to acquire DB connection for {:?}", + R::ROUND + ))?; + prover_storage + .fri_witness_generator_dal() + .get_witness_job_attempts(*job_id, R::ROUND) + .await + .map(|attempts| attempts.unwrap_or(0)) + .context(format!("failed to get job attempts for {:?}", R::ROUND)) + } +} diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/node_aggregation/artifacts.rs similarity index 81% rename from prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/node_aggregation/artifacts.rs index 245027f0d67..e4f5c90080d 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/node_aggregation/artifacts.rs @@ -7,17 +7,18 @@ use zksync_prover_fri_types::keys::AggregationsKey; use zksync_types::{basic_fri_types::AggregationRound, prover_dal::NodeAggregationJobMetadata}; use crate::{ - artifacts::{AggregationBlobUrls, ArtifactsManager, BlobUrls}, + artifacts::{AggregationBlobUrls, ArtifactsManager}, metrics::WITNESS_GENERATOR_METRICS, - node_aggregation::{NodeAggregationArtifacts, NodeAggregationWitnessGenerator}, - utils::{save_node_aggregations_artifacts, AggregationWrapper}, + rounds::node_aggregation::{NodeAggregation, NodeAggregationArtifacts}, + utils::AggregationWrapper, }; #[async_trait] -impl ArtifactsManager for NodeAggregationWitnessGenerator { +impl ArtifactsManager for NodeAggregation { type InputMetadata = NodeAggregationJobMetadata; type InputArtifacts = AggregationWrapper; type OutputArtifacts = NodeAggregationArtifacts; + type BlobUrls = AggregationBlobUrls; #[tracing::instrument( skip_all, @@ -46,46 +47,45 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { skip_all, fields(l1_batch = %artifacts.block_number, circuit_id = %artifacts.circuit_id) )] - async fn save_artifacts( + async fn save_to_bucket( _job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + _shall_save_to_public_bucket: bool, + _public_blob_store: Option>, + ) -> AggregationBlobUrls { let started_at = Instant::now(); - let aggregations_urls = save_node_aggregations_artifacts( - artifacts.block_number, - artifacts.circuit_id, - artifacts.depth, - artifacts.next_aggregations, - object_store, - ) - .await; + let key = AggregationsKey { + block_number: artifacts.block_number, + circuit_id: artifacts.circuit_id, + depth: artifacts.depth, + }; + let aggregation_urls = object_store + .put(key, &AggregationWrapper(artifacts.next_aggregations)) + .await + .unwrap(); WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::NodeAggregation.into()] .observe(started_at.elapsed()); - BlobUrls::Aggregation(AggregationBlobUrls { - aggregations_urls, + AggregationBlobUrls { + aggregation_urls, circuit_ids_and_urls: artifacts.recursive_circuit_ids_and_urls, - }) + } } #[tracing::instrument( skip_all, fields(l1_batch = % job_id) )] - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: AggregationBlobUrls, artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { let mut prover_connection = connection_pool.connection().await.unwrap(); - let blob_urls = match blob_urls { - BlobUrls::Aggregation(blobs) => blobs, - _ => unreachable!(), - }; let mut transaction = prover_connection.start_transaction().await.unwrap(); let dependent_jobs = blob_urls.circuit_ids_and_urls.len(); let protocol_version_id = transaction @@ -111,7 +111,7 @@ impl ArtifactsManager for NodeAggregationWitnessGenerator { artifacts.circuit_id, Some(dependent_jobs as i32), artifacts.depth, - &blob_urls.aggregations_urls, + &blob_urls.aggregation_urls, protocol_version_id, ) .await; diff --git a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs b/prover/crates/bin/witness_generator/src/rounds/node_aggregation/mod.rs similarity index 70% rename from prover/crates/bin/witness_generator/src/node_aggregation/mod.rs rename to prover/crates/bin/witness_generator/src/rounds/node_aggregation/mod.rs index 047caa363a8..e891d313ffc 100644 --- a/prover/crates/bin/witness_generator/src/node_aggregation/mod.rs +++ b/prover/crates/bin/witness_generator/src/rounds/node_aggregation/mod.rs @@ -1,14 +1,14 @@ use std::{sync::Arc, time::Instant}; use anyhow::Context as _; +use async_trait::async_trait; use circuit_definitions::circuit_definitions::recursion_layer::RECURSION_ARITY; use tokio::sync::Semaphore; use zkevm_test_harness::witness::recursive_aggregation::{ compute_node_vk_commitment, create_node_witness, }; -use zksync_config::configs::FriWitnessGeneratorConfig; use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; use zksync_prover_fri_types::{ circuit_definitions::{ boojum::field::goldilocks::GoldilocksField, @@ -18,7 +18,7 @@ use zksync_prover_fri_types::{ encodings::recursion_request::RecursionQueueSimulator, zkevm_circuits::recursion::leaf_layer::input::RecursionLeafParametersWitness, }, - FriProofWrapper, + get_current_pod_name, FriProofWrapper, }; use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; use zksync_types::{ @@ -29,11 +29,10 @@ use zksync_types::{ use crate::{ artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, + rounds::JobManager, utils::{load_proofs_for_job_ids, save_recursive_layer_prover_input_artifacts}, }; - mod artifacts; -mod job_processor; #[derive(Clone)] pub struct NodeAggregationArtifacts { @@ -56,42 +55,26 @@ pub struct NodeAggregationWitnessGeneratorJob { all_leafs_layer_params: Vec<(u8, RecursionLeafParametersWitness)>, } -#[derive(Debug)] -pub struct NodeAggregationWitnessGenerator { - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, -} +pub struct NodeAggregation; -impl NodeAggregationWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, - ) -> Self { - Self { - config, - object_store, - prover_connection_pool, - protocol_version, - keystore, - } - } +#[async_trait] +impl JobManager for NodeAggregation { + type Job = NodeAggregationWitnessGeneratorJob; + type Metadata = NodeAggregationJobMetadata; + + const ROUND: AggregationRound = AggregationRound::NodeAggregation; + const SERVICE_NAME: &'static str = "fri_node_aggregation_witness_generator"; #[tracing::instrument( skip_all, fields(l1_batch = % job.block_number, circuit_id = % job.circuit_id) )] - pub async fn process_job_impl( + async fn process_job( job: NodeAggregationWitnessGeneratorJob, - started_at: Instant, object_store: Arc, max_circuits_in_flight: usize, - ) -> NodeAggregationArtifacts { + started_at: Instant, + ) -> anyhow::Result { let node_vk_commitment = compute_node_vk_commitment(job.node_vk.clone()); tracing::info!( "Starting witness generation of type {:?} for block {} circuit id {} depth {}", @@ -205,52 +188,71 @@ impl NodeAggregationWitnessGenerator { started_at.elapsed(), ); - NodeAggregationArtifacts { + Ok(NodeAggregationArtifacts { circuit_id: job.circuit_id, block_number: job.block_number, depth: job.depth + 1, next_aggregations, recursive_circuit_ids_and_urls, - } + }) } -} -#[tracing::instrument( - skip_all, - fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) -)] -pub async fn prepare_job( - metadata: NodeAggregationJobMetadata, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let artifacts = NodeAggregationWitnessGenerator::get_artifacts(&metadata, object_store).await?; + #[tracing::instrument( + skip_all, + fields(l1_batch = % metadata.block_number, circuit_id = % metadata.circuit_id) + )] + async fn prepare_job( + metadata: Self::Metadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let artifacts = Self::get_artifacts(&metadata, object_store).await?; + + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); + let started_at = Instant::now(); + let leaf_vk = keystore + .load_recursive_layer_verification_key(metadata.circuit_id) + .context("get_recursive_layer_vk_for_circuit_type")?; + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; - let started_at = Instant::now(); - let leaf_vk = keystore - .load_recursive_layer_verification_key(metadata.circuit_id) - .context("get_recursive_layer_vk_for_circuit_type")?; - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type()")?; + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::NodeAggregation.into()] + .observe(started_at.elapsed()); + + Ok(NodeAggregationWitnessGeneratorJob { + circuit_id: metadata.circuit_id, + block_number: metadata.block_number, + depth: metadata.depth, + aggregations: artifacts.0, + proofs_ids: metadata.prover_job_ids_for_proofs, + leaf_vk, + node_vk, + all_leafs_layer_params: get_leaf_vk_params(&keystore) + .context("get_leaf_vk_params()")?, + }) + } - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::NodeAggregation.into()] - .observe(started_at.elapsed()); + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + let Some(metadata) = connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_next_node_aggregation_job(protocol_version, &pod_name) + .await + else { + return Ok(None); + }; - Ok(NodeAggregationWitnessGeneratorJob { - circuit_id: metadata.circuit_id, - block_number: metadata.block_number, - depth: metadata.depth, - aggregations: artifacts.0, - proofs_ids: metadata.prover_job_ids_for_proofs, - leaf_vk, - node_vk, - all_leafs_layer_params: get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?, - }) + Ok(Some((metadata.id, metadata))) + } } diff --git a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/recursion_tip/artifacts.rs similarity index 89% rename from prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/recursion_tip/artifacts.rs index 8379fcf9f93..6d18795c2b3 100644 --- a/prover/crates/bin/witness_generator/src/recursion_tip/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/recursion_tip/artifacts.rs @@ -12,15 +12,16 @@ use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapp use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, - recursion_tip::{RecursionTipArtifacts, RecursionTipWitnessGenerator}, + artifacts::ArtifactsManager, + rounds::recursion_tip::{RecursionTip, RecursionTipArtifacts}, }; #[async_trait] -impl ArtifactsManager for RecursionTipWitnessGenerator { +impl ArtifactsManager for RecursionTip { type InputMetadata = Vec<(u8, u32)>; type InputArtifacts = Vec; type OutputArtifacts = RecursionTipArtifacts; + type BlobUrls = String; /// Loads all proofs for a given recursion tip's job ids. /// Note that recursion tip may not have proofs for some specific circuits (because the batch didn't contain them). @@ -73,11 +74,13 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { Ok(proofs) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + _shall_save_to_public_bucket: bool, + _public_blob_store: Option>, + ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), circuit_id: 255, @@ -86,29 +89,22 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { aggregation_round: AggregationRound::RecursionTip, }; - let blob_url = object_store + object_store .put( key, &CircuitWrapper::Recursive(artifacts.recursion_tip_circuit.clone()), ) .await - .unwrap(); - - BlobUrls::Url(blob_url) + .unwrap() } - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: String, _artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_url = match blob_urls { - BlobUrls::Url(url) => url, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await?; let mut transaction = prover_connection.start_transaction().await?; let protocol_version_id = transaction @@ -123,7 +119,7 @@ impl ArtifactsManager for RecursionTipWitnessGenerator { 0, 0, AggregationRound::RecursionTip, - &blob_url, + &blob_urls, false, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/rounds/recursion_tip/mod.rs b/prover/crates/bin/witness_generator/src/rounds/recursion_tip/mod.rs new file mode 100644 index 00000000000..873f6798481 --- /dev/null +++ b/prover/crates/bin/witness_generator/src/rounds/recursion_tip/mod.rs @@ -0,0 +1,254 @@ +use std::{sync::Arc, time::Instant}; + +use anyhow::Context; +use async_trait::async_trait; +use circuit_definitions::{ + circuit_definitions::recursion_layer::{ + recursion_tip::RecursionTipCircuit, ZkSyncRecursionLayerStorageType, + ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, RECURSION_TIP_ARITY, + }, + recursion_layer_proof_config, +}; +use zkevm_test_harness::{ + boojum::{ + field::{ + goldilocks::{GoldilocksExt2, GoldilocksField}, + Field, U64Representable, + }, + gadgets::{ + queue::QueueState, recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, + traits::allocatable::CSAllocatable, + }, + }, + witness::{ + recursive_aggregation::compute_node_vk_commitment, + utils::take_sponge_like_queue_state_from_simulator, + }, + zkevm_circuits::{ + recursion::{ + leaf_layer::input::RecursionLeafParametersWitness, + recursion_tip::{ + input::{RecursionTipInputWitness, RecursionTipInstanceWitness}, + RecursionTipConfig, + }, + }, + scheduler::aux::BaseLayerCircuitType, + }, +}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{get_current_pod_name, keys::ClosedFormInputKey}; +use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, +}; + +use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, rounds::JobManager, + utils::ClosedFormInputWrapper, +}; + +mod artifacts; + +#[derive(Clone)] +pub struct RecursionTipWitnessGeneratorJob { + block_number: L1BatchNumber, + recursion_tip_witness: RecursionTipInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + node_vk: ZkSyncRecursionLayerVerificationKey, +} + +#[derive(Clone)] +pub struct RecursionTipArtifacts { + pub recursion_tip_circuit: ZkSyncRecursiveLayerCircuit, +} + +pub struct RecursionTipJobMetadata { + pub l1_batch_number: L1BatchNumber, + pub final_node_proof_job_ids: Vec<(u8, u32)>, +} + +pub struct RecursionTip; + +#[async_trait] +impl JobManager for RecursionTip { + type Job = RecursionTipWitnessGeneratorJob; + type Metadata = RecursionTipJobMetadata; + + const ROUND: AggregationRound = AggregationRound::RecursionTip; + const SERVICE_NAME: &'static str = "recursion_tip_witness_generator"; + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job.block_number) + )] + async fn process_job( + job: Self::Job, + _object_store: Arc, + _max_circuits_in_flight: usize, + started_at: Instant, + ) -> anyhow::Result { + tracing::info!( + "Starting fri witness generation of type {:?} for block {}", + AggregationRound::RecursionTip, + job.block_number.0 + ); + let config = RecursionTipConfig { + proof_config: recursion_layer_proof_config(), + vk_fixed_parameters: job.node_vk.clone().into_inner().fixed_parameters, + _marker: std::marker::PhantomData, + }; + + let recursive_tip_circuit = RecursionTipCircuit { + witness: job.recursion_tip_witness, + config, + transcript_params: (), + _marker: std::marker::PhantomData, + }; + + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::RecursionTip.into()] + .observe(started_at.elapsed()); + + tracing::info!( + "Recursion tip generation for block {} is complete in {:?}", + job.block_number.0, + started_at.elapsed() + ); + + Ok(RecursionTipArtifacts { + recursion_tip_circuit: ZkSyncRecursiveLayerCircuit::RecursionTipCircuit( + recursive_tip_circuit, + ), + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.l1_batch_number) + )] + async fn prepare_job( + metadata: RecursionTipJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let recursion_tip_proofs = + Self::get_artifacts(&metadata.final_node_proof_job_ids, object_store).await?; + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::RecursionTip.into()] + .observe(started_at.elapsed()); + + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; + + let node_layer_vk_commitment = compute_node_vk_commitment(node_vk.clone()); + + let mut recursion_queues = vec![]; + for circuit_id in BaseLayerCircuitType::as_iter_u8() { + let key = ClosedFormInputKey { + block_number: metadata.l1_batch_number, + circuit_id, + }; + let ClosedFormInputWrapper(_, recursion_queue) = object_store.get(key).await?; + recursion_queues.push((circuit_id, recursion_queue)); + } + + // RECURSION_TIP_ARITY is the maximum amount of proof that a single recursion tip can support. + // Given recursion_tip has at most 1 proof per circuit, it implies we can't add more circuit types without bumping arity up. + assert!( + RECURSION_TIP_ARITY >= recursion_queues.len(), + "recursion tip received more circuits ({}) than supported ({})", + recursion_queues.len(), + RECURSION_TIP_ARITY + ); + let mut branch_circuit_type_set = [GoldilocksField::ZERO; RECURSION_TIP_ARITY]; + let mut queue_set: [_; RECURSION_TIP_ARITY] = + std::array::from_fn(|_| QueueState::placeholder_witness()); + + for (index, (circuit_id, recursion_queue)) in recursion_queues.iter().enumerate() { + branch_circuit_type_set[index] = + GoldilocksField::from_u64_unchecked(*circuit_id as u64); + queue_set[index] = take_sponge_like_queue_state_from_simulator(recursion_queue); + } + + let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; + assert_eq!( + leaf_vk_commits.len(), + 16, + "expected 16 leaf vk commits, which corresponds to the numebr of circuits, got {}", + leaf_vk_commits.len() + ); + let leaf_layer_parameters: [RecursionLeafParametersWitness; 16] = + leaf_vk_commits + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + + let input = RecursionTipInputWitness { + leaf_layer_parameters, + node_layer_vk_commitment, + branch_circuit_type_set, + queue_set, + }; + + let recursion_tip_witness = RecursionTipInstanceWitness { + input, + vk_witness: node_vk.clone().into_inner(), + proof_witnesses: recursion_tip_proofs.into(), + }; + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::RecursionTip.into()] + .observe(started_at.elapsed()); + + Ok(RecursionTipWitnessGeneratorJob { + block_number: metadata.l1_batch_number, + recursion_tip_witness, + node_vk, + }) + } + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + let Some((l1_batch_number, number_of_final_node_jobs)) = connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_next_recursion_tip_witness_job(protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + + let final_node_proof_job_ids = connection_pool + .connection() + .await? + .fri_prover_jobs_dal() + .get_final_node_proof_job_ids_for(l1_batch_number) + .await; + + assert_eq!( + final_node_proof_job_ids.len(), + number_of_final_node_jobs as usize, + "recursion tip witness job was scheduled without all final node jobs being completed; expected {}, got {}", + number_of_final_node_jobs, final_node_proof_job_ids.len() + ); + + Ok(Some(( + l1_batch_number.0, + RecursionTipJobMetadata { + l1_batch_number, + final_node_proof_job_ids, + }, + ))) + } +} diff --git a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs b/prover/crates/bin/witness_generator/src/rounds/scheduler/artifacts.rs similarity index 81% rename from prover/crates/bin/witness_generator/src/scheduler/artifacts.rs rename to prover/crates/bin/witness_generator/src/rounds/scheduler/artifacts.rs index b20a9764188..ce9b32559b2 100644 --- a/prover/crates/bin/witness_generator/src/scheduler/artifacts.rs +++ b/prover/crates/bin/witness_generator/src/rounds/scheduler/artifacts.rs @@ -8,15 +8,16 @@ use zksync_prover_fri_types::{keys::FriCircuitKey, CircuitWrapper, FriProofWrapp use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; use crate::{ - artifacts::{ArtifactsManager, BlobUrls}, - scheduler::{SchedulerArtifacts, SchedulerWitnessGenerator}, + artifacts::ArtifactsManager, + rounds::scheduler::{Scheduler, SchedulerArtifacts}, }; #[async_trait] -impl ArtifactsManager for SchedulerWitnessGenerator { +impl ArtifactsManager for Scheduler { type InputMetadata = u32; type InputArtifacts = FriProofWrapper; type OutputArtifacts = SchedulerArtifacts; + type BlobUrls = String; async fn get_artifacts( metadata: &Self::InputMetadata, @@ -27,11 +28,13 @@ impl ArtifactsManager for SchedulerWitnessGenerator { Ok(artifacts) } - async fn save_artifacts( + async fn save_to_bucket( job_id: u32, artifacts: Self::OutputArtifacts, object_store: &dyn ObjectStore, - ) -> BlobUrls { + _shall_save_to_public_bucket: bool, + _public_blob_store: Option>, + ) -> String { let key = FriCircuitKey { block_number: L1BatchNumber(job_id), circuit_id: 1, @@ -40,29 +43,22 @@ impl ArtifactsManager for SchedulerWitnessGenerator { aggregation_round: AggregationRound::Scheduler, }; - let blob_url = object_store + object_store .put( key, &CircuitWrapper::Recursive(artifacts.scheduler_circuit.clone()), ) .await - .unwrap(); - - BlobUrls::Url(blob_url) + .unwrap() } - async fn update_database( + async fn save_to_database( connection_pool: &ConnectionPool, job_id: u32, started_at: Instant, - blob_urls: BlobUrls, + blob_urls: String, _artifacts: Self::OutputArtifacts, ) -> anyhow::Result<()> { - let blob_url = match blob_urls { - BlobUrls::Url(url) => url, - _ => panic!("Unexpected blob urls type"), - }; - let mut prover_connection = connection_pool.connection().await?; let mut transaction = prover_connection.start_transaction().await?; let protocol_version_id = transaction @@ -77,7 +73,7 @@ impl ArtifactsManager for SchedulerWitnessGenerator { 0, 0, AggregationRound::Scheduler, - &blob_url, + &blob_urls, false, protocol_version_id, ) diff --git a/prover/crates/bin/witness_generator/src/rounds/scheduler/mod.rs b/prover/crates/bin/witness_generator/src/rounds/scheduler/mod.rs new file mode 100644 index 00000000000..fc7dfa2accb --- /dev/null +++ b/prover/crates/bin/witness_generator/src/rounds/scheduler/mod.rs @@ -0,0 +1,206 @@ +use std::{convert::TryInto, sync::Arc, time::Instant}; + +use anyhow::Context as _; +use async_trait::async_trait; +use zkevm_test_harness::zkevm_circuits::recursion::{ + leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, +}; +use zksync_object_store::ObjectStore; +use zksync_prover_dal::{ConnectionPool, Prover, ProverDal}; +use zksync_prover_fri_types::{ + circuit_definitions::{ + boojum::{ + field::goldilocks::{GoldilocksExt2, GoldilocksField}, + gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, + }, + circuit_definitions::recursion_layer::{ + scheduler::SchedulerCircuit, ZkSyncRecursionLayerStorageType, + ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, SCHEDULER_CAPACITY, + }, + recursion_layer_proof_config, + zkevm_circuits::scheduler::{input::SchedulerCircuitInstanceWitness, SchedulerConfig}, + }, + get_current_pod_name, FriProofWrapper, +}; +use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; +use zksync_types::{ + basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, +}; + +use crate::{ + artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, rounds::JobManager, + utils::SchedulerPartialInputWrapper, +}; + +mod artifacts; + +#[derive(Clone)] +pub struct SchedulerArtifacts { + pub scheduler_circuit: ZkSyncRecursiveLayerCircuit, +} + +#[derive(Clone)] +pub struct SchedulerWitnessGeneratorJob { + block_number: L1BatchNumber, + scheduler_witness: SchedulerCircuitInstanceWitness< + GoldilocksField, + CircuitGoldilocksPoseidon2Sponge, + GoldilocksExt2, + >, + node_vk: ZkSyncRecursionLayerVerificationKey, + recursion_tip_vk: ZkSyncRecursionLayerVerificationKey, + leaf_layer_parameters: + [RecursionLeafParametersWitness; NUM_BASE_LAYER_CIRCUITS], +} + +pub struct SchedulerWitnessJobMetadata { + pub l1_batch_number: L1BatchNumber, + pub recursion_tip_job_id: u32, +} + +pub struct Scheduler; + +#[async_trait] +impl JobManager for Scheduler { + type Job = SchedulerWitnessGeneratorJob; + type Metadata = SchedulerWitnessJobMetadata; + + const ROUND: AggregationRound = AggregationRound::Scheduler; + const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; + + #[tracing::instrument( + skip_all, + fields(l1_batch = %job.block_number) + )] + async fn process_job( + job: SchedulerWitnessGeneratorJob, + _object_store: Arc, + _max_circuits_in_flight: usize, + started_at: Instant, + ) -> anyhow::Result { + tracing::info!( + "Starting fri witness generation of type {:?} for block {}", + AggregationRound::Scheduler, + job.block_number.0 + ); + let config = SchedulerConfig { + proof_config: recursion_layer_proof_config(), + vk_fixed_parameters: job.recursion_tip_vk.clone().into_inner().fixed_parameters, + capacity: SCHEDULER_CAPACITY, + _marker: std::marker::PhantomData, + recursion_tip_vk: job.recursion_tip_vk.into_inner(), + node_layer_vk: job.node_vk.into_inner(), + leaf_layer_parameters: job.leaf_layer_parameters, + }; + + let scheduler_circuit = SchedulerCircuit { + witness: job.scheduler_witness, + config, + transcript_params: (), + _marker: std::marker::PhantomData, + }; + WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + + tracing::info!( + "Scheduler generation for block {} is complete in {:?}", + job.block_number.0, + started_at.elapsed() + ); + + Ok(SchedulerArtifacts { + scheduler_circuit: ZkSyncRecursiveLayerCircuit::SchedulerCircuit(scheduler_circuit), + }) + } + + #[tracing::instrument( + skip_all, + fields(l1_batch = %metadata.l1_batch_number) + )] + async fn prepare_job( + metadata: SchedulerWitnessJobMetadata, + object_store: &dyn ObjectStore, + keystore: Keystore, + ) -> anyhow::Result { + let started_at = Instant::now(); + let wrapper = Self::get_artifacts(&metadata.recursion_tip_job_id, object_store).await?; + let recursion_tip_proof = match wrapper { + FriProofWrapper::Base(_) => Err(anyhow::anyhow!( + "Expected only recursive proofs for scheduler l1 batch {}, got Base", + metadata.l1_batch_number + )), + FriProofWrapper::Recursive(recursive_proof) => Ok(recursive_proof.into_inner()), + }?; + WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + + let started_at = Instant::now(); + let node_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, + ) + .context("get_recursive_layer_vk_for_circuit_type()")?; + let SchedulerPartialInputWrapper(mut scheduler_witness) = + object_store.get(metadata.l1_batch_number).await?; + + let recursion_tip_vk = keystore + .load_recursive_layer_verification_key( + ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, + ) + .context("get_recursion_tip_vk()")?; + scheduler_witness.proof_witnesses = vec![recursion_tip_proof].into(); + + let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; + let leaf_layer_parameters = leaf_vk_commits + .iter() + .map(|el| el.1.clone()) + .collect::>() + .try_into() + .unwrap(); + + WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::Scheduler.into()] + .observe(started_at.elapsed()); + + Ok(SchedulerWitnessGeneratorJob { + block_number: metadata.l1_batch_number, + scheduler_witness, + node_vk, + leaf_layer_parameters, + recursion_tip_vk, + }) + } + + async fn get_metadata( + connection_pool: ConnectionPool, + protocol_version: ProtocolSemanticVersion, + ) -> anyhow::Result> { + let pod_name = get_current_pod_name(); + let Some(l1_batch_number) = connection_pool + .connection() + .await? + .fri_witness_generator_dal() + .get_next_scheduler_witness_job(protocol_version, &pod_name) + .await + else { + return Ok(None); + }; + let recursion_tip_job_id = connection_pool + .connection() + .await? + .fri_prover_jobs_dal() + .get_recursion_tip_proof_job_id(l1_batch_number) + .await + .context(format!( + "could not find recursion tip proof for l1 batch {}", + l1_batch_number + ))?; + + Ok(Some(( + l1_batch_number.0, + SchedulerWitnessJobMetadata { + l1_batch_number, + recursion_tip_job_id, + }, + ))) + } +} diff --git a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs b/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs deleted file mode 100644 index fe4f2db4090..00000000000 --- a/prover/crates/bin/witness_generator/src/scheduler/job_processor.rs +++ /dev/null @@ -1,129 +0,0 @@ -use std::time::Instant; - -use anyhow::Context as _; -use async_trait::async_trait; -use zksync_prover_dal::ProverDal; -use zksync_prover_fri_types::get_current_pod_name; -use zksync_queued_job_processor::JobProcessor; -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; - -use crate::{ - artifacts::ArtifactsManager, - metrics::WITNESS_GENERATOR_METRICS, - scheduler::{ - prepare_job, SchedulerArtifacts, SchedulerWitnessGenerator, SchedulerWitnessGeneratorJob, - }, -}; - -#[async_trait] -impl JobProcessor for SchedulerWitnessGenerator { - type Job = SchedulerWitnessGeneratorJob; - type JobId = L1BatchNumber; - type JobArtifacts = SchedulerArtifacts; - - const SERVICE_NAME: &'static str = "fri_scheduler_witness_generator"; - - async fn get_next_job(&self) -> anyhow::Result> { - let mut prover_connection = self.prover_connection_pool.connection().await?; - let pod_name = get_current_pod_name(); - let Some(l1_batch_number) = prover_connection - .fri_witness_generator_dal() - .get_next_scheduler_witness_job(self.protocol_version, &pod_name) - .await - else { - return Ok(None); - }; - let recursion_tip_job_id = prover_connection - .fri_prover_jobs_dal() - .get_recursion_tip_proof_job_id(l1_batch_number) - .await - .context(format!( - "could not find recursion tip proof for l1 batch {}", - l1_batch_number - ))?; - - Ok(Some(( - l1_batch_number, - prepare_job( - l1_batch_number, - recursion_tip_job_id, - &*self.object_store, - self.keystore.clone(), - ) - .await - .context("prepare_job()")?, - ))) - } - - async fn save_failure(&self, job_id: L1BatchNumber, _started_at: Instant, error: String) -> () { - self.prover_connection_pool - .connection() - .await - .unwrap() - .fri_witness_generator_dal() - .mark_scheduler_job_failed(&error, job_id) - .await; - } - - #[allow(clippy::async_yields_async)] - async fn process_job( - &self, - _job_id: &Self::JobId, - job: SchedulerWitnessGeneratorJob, - started_at: Instant, - ) -> tokio::task::JoinHandle> { - tokio::task::spawn_blocking(move || { - let block_number = job.block_number; - let _span = tracing::info_span!("scheduler", %block_number).entered(); - Ok(Self::process_job_sync(job, started_at)) - }) - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job_id) - )] - async fn save_result( - &self, - job_id: L1BatchNumber, - started_at: Instant, - artifacts: SchedulerArtifacts, - ) -> anyhow::Result<()> { - let blob_save_started_at = Instant::now(); - - let blob_urls = - Self::save_artifacts(job_id.0, artifacts.clone(), &*self.object_store).await; - - WITNESS_GENERATOR_METRICS.blob_save_time[&AggregationRound::Scheduler.into()] - .observe(blob_save_started_at.elapsed()); - - Self::update_database( - &self.prover_connection_pool, - job_id.0, - started_at, - blob_urls, - artifacts, - ) - .await?; - - Ok(()) - } - - fn max_attempts(&self) -> u32 { - self.config.max_attempts - } - - async fn get_job_attempts(&self, job_id: &L1BatchNumber) -> anyhow::Result { - let mut prover_storage = self - .prover_connection_pool - .connection() - .await - .context("failed to acquire DB connection for SchedulerWitnessGenerator")?; - prover_storage - .fri_witness_generator_dal() - .get_scheduler_witness_job_attempts(*job_id) - .await - .map(|attempts| attempts.unwrap_or(0)) - .context("failed to get job attempts for SchedulerWitnessGenerator") - } -} diff --git a/prover/crates/bin/witness_generator/src/scheduler/mod.rs b/prover/crates/bin/witness_generator/src/scheduler/mod.rs deleted file mode 100644 index 10230b35c4f..00000000000 --- a/prover/crates/bin/witness_generator/src/scheduler/mod.rs +++ /dev/null @@ -1,183 +0,0 @@ -use std::{convert::TryInto, sync::Arc, time::Instant}; - -use anyhow::Context as _; -use zkevm_test_harness::zkevm_circuits::recursion::{ - leaf_layer::input::RecursionLeafParametersWitness, NUM_BASE_LAYER_CIRCUITS, -}; -use zksync_config::configs::FriWitnessGeneratorConfig; -use zksync_object_store::ObjectStore; -use zksync_prover_dal::{ConnectionPool, Prover}; -use zksync_prover_fri_types::{ - circuit_definitions::{ - boojum::{ - field::goldilocks::{GoldilocksExt2, GoldilocksField}, - gadgets::recursion::recursive_tree_hasher::CircuitGoldilocksPoseidon2Sponge, - }, - circuit_definitions::recursion_layer::{ - scheduler::SchedulerCircuit, ZkSyncRecursionLayerStorageType, - ZkSyncRecursionLayerVerificationKey, ZkSyncRecursiveLayerCircuit, SCHEDULER_CAPACITY, - }, - recursion_layer_proof_config, - zkevm_circuits::scheduler::{input::SchedulerCircuitInstanceWitness, SchedulerConfig}, - }, - FriProofWrapper, -}; -use zksync_prover_keystore::{keystore::Keystore, utils::get_leaf_vk_params}; -use zksync_types::{ - basic_fri_types::AggregationRound, protocol_version::ProtocolSemanticVersion, L1BatchNumber, -}; - -use crate::{ - artifacts::ArtifactsManager, metrics::WITNESS_GENERATOR_METRICS, - utils::SchedulerPartialInputWrapper, -}; - -mod artifacts; -mod job_processor; - -#[derive(Clone)] -pub struct SchedulerArtifacts { - pub scheduler_circuit: ZkSyncRecursiveLayerCircuit, -} - -#[derive(Clone)] -pub struct SchedulerWitnessGeneratorJob { - block_number: L1BatchNumber, - scheduler_witness: SchedulerCircuitInstanceWitness< - GoldilocksField, - CircuitGoldilocksPoseidon2Sponge, - GoldilocksExt2, - >, - node_vk: ZkSyncRecursionLayerVerificationKey, - recursion_tip_vk: ZkSyncRecursionLayerVerificationKey, - leaf_layer_parameters: - [RecursionLeafParametersWitness; NUM_BASE_LAYER_CIRCUITS], -} - -#[derive(Debug)] -pub struct SchedulerWitnessGenerator { - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, -} - -impl SchedulerWitnessGenerator { - pub fn new( - config: FriWitnessGeneratorConfig, - object_store: Arc, - prover_connection_pool: ConnectionPool, - protocol_version: ProtocolSemanticVersion, - keystore: Keystore, - ) -> Self { - Self { - config, - object_store, - prover_connection_pool, - protocol_version, - keystore, - } - } - - #[tracing::instrument( - skip_all, - fields(l1_batch = %job.block_number) - )] - pub fn process_job_sync( - job: SchedulerWitnessGeneratorJob, - started_at: Instant, - ) -> SchedulerArtifacts { - tracing::info!( - "Starting fri witness generation of type {:?} for block {}", - AggregationRound::Scheduler, - job.block_number.0 - ); - let config = SchedulerConfig { - proof_config: recursion_layer_proof_config(), - vk_fixed_parameters: job.recursion_tip_vk.clone().into_inner().fixed_parameters, - capacity: SCHEDULER_CAPACITY, - _marker: std::marker::PhantomData, - recursion_tip_vk: job.recursion_tip_vk.into_inner(), - node_layer_vk: job.node_vk.into_inner(), - leaf_layer_parameters: job.leaf_layer_parameters, - }; - - let scheduler_circuit = SchedulerCircuit { - witness: job.scheduler_witness, - config, - transcript_params: (), - _marker: std::marker::PhantomData, - }; - WITNESS_GENERATOR_METRICS.witness_generation_time[&AggregationRound::Scheduler.into()] - .observe(started_at.elapsed()); - - tracing::info!( - "Scheduler generation for block {} is complete in {:?}", - job.block_number.0, - started_at.elapsed() - ); - - SchedulerArtifacts { - scheduler_circuit: ZkSyncRecursiveLayerCircuit::SchedulerCircuit(scheduler_circuit), - } - } -} - -#[tracing::instrument( - skip_all, - fields(l1_batch = %l1_batch_number) -)] -pub async fn prepare_job( - l1_batch_number: L1BatchNumber, - recursion_tip_job_id: u32, - object_store: &dyn ObjectStore, - keystore: Keystore, -) -> anyhow::Result { - let started_at = Instant::now(); - let wrapper = - SchedulerWitnessGenerator::get_artifacts(&recursion_tip_job_id, object_store).await?; - let recursion_tip_proof = match wrapper { - FriProofWrapper::Base(_) => Err(anyhow::anyhow!( - "Expected only recursive proofs for scheduler l1 batch {l1_batch_number}, got Base" - )), - FriProofWrapper::Recursive(recursive_proof) => Ok(recursive_proof.into_inner()), - }?; - WITNESS_GENERATOR_METRICS.blob_fetch_time[&AggregationRound::Scheduler.into()] - .observe(started_at.elapsed()); - - let started_at = Instant::now(); - let node_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::NodeLayerCircuit as u8, - ) - .context("get_recursive_layer_vk_for_circuit_type()")?; - let SchedulerPartialInputWrapper(mut scheduler_witness) = - object_store.get(l1_batch_number).await?; - - let recursion_tip_vk = keystore - .load_recursive_layer_verification_key( - ZkSyncRecursionLayerStorageType::RecursionTipCircuit as u8, - ) - .context("get_recursion_tip_vk()")?; - scheduler_witness.proof_witnesses = vec![recursion_tip_proof].into(); - - let leaf_vk_commits = get_leaf_vk_params(&keystore).context("get_leaf_vk_params()")?; - let leaf_layer_parameters = leaf_vk_commits - .iter() - .map(|el| el.1.clone()) - .collect::>() - .try_into() - .unwrap(); - - WITNESS_GENERATOR_METRICS.prepare_job_time[&AggregationRound::Scheduler.into()] - .observe(started_at.elapsed()); - - Ok(SchedulerWitnessGeneratorJob { - block_number: l1_batch_number, - scheduler_witness, - node_vk, - leaf_layer_parameters, - recursion_tip_vk, - }) -} diff --git a/prover/crates/bin/witness_generator/src/utils.rs b/prover/crates/bin/witness_generator/src/utils.rs index 3ea2b539773..8524bdae9ff 100644 --- a/prover/crates/bin/witness_generator/src/utils.rs +++ b/prover/crates/bin/witness_generator/src/utils.rs @@ -204,28 +204,6 @@ pub async fn save_recursive_layer_prover_input_artifacts( ids_and_urls } -#[tracing::instrument( - skip_all, - fields(l1_batch = %block_number, circuit_id = %circuit_id) -)] -pub async fn save_node_aggregations_artifacts( - block_number: L1BatchNumber, - circuit_id: u8, - depth: u16, - aggregations: Vec<(u64, RecursionQueueSimulator)>, - object_store: &dyn ObjectStore, -) -> String { - let key = AggregationsKey { - block_number, - circuit_id, - depth, - }; - object_store - .put(key, &AggregationWrapper(aggregations)) - .await - .unwrap() -} - #[tracing::instrument(skip_all)] pub async fn load_proofs_for_job_ids( job_ids: &[u32], diff --git a/prover/crates/bin/witness_generator/tests/basic_test.rs b/prover/crates/bin/witness_generator/tests/basic_test.rs index 3323e3c681e..be6452dfc7d 100644 --- a/prover/crates/bin/witness_generator/tests/basic_test.rs +++ b/prover/crates/bin/witness_generator/tests/basic_test.rs @@ -15,8 +15,7 @@ use zksync_types::{ L1BatchNumber, }; use zksync_witness_generator::{ - leaf_aggregation::{prepare_leaf_aggregation_job, LeafAggregationWitnessGenerator}, - node_aggregation::{self, NodeAggregationWitnessGenerator}, + rounds::{JobManager, LeafAggregation, NodeAggregation}, utils::AggregationWrapper, }; @@ -52,17 +51,13 @@ async fn test_leaf_witness_gen() { .unwrap(); let keystore = Keystore::locate(); - let job = prepare_leaf_aggregation_job(leaf_aggregation_job_metadata, &*object_store, keystore) + let job = LeafAggregation::prepare_job(leaf_aggregation_job_metadata, &*object_store, keystore) .await .unwrap(); - let artifacts = LeafAggregationWitnessGenerator::process_job_impl( - job, - Instant::now(), - object_store.clone(), - 500, - ) - .await; + let artifacts = LeafAggregation::process_job(job, object_store.clone(), 500, Instant::now()) + .await + .unwrap(); let aggregations = AggregationWrapper(artifacts.aggregations); @@ -142,18 +137,14 @@ async fn test_node_witness_gen() { }; let keystore = Keystore::locate(); - let job = - node_aggregation::prepare_job(node_aggregation_job_metadata, &*object_store, keystore) - .await - .unwrap(); - - let artifacts = NodeAggregationWitnessGenerator::process_job_impl( - job, - Instant::now(), - object_store.clone(), - 500, - ) - .await; + let job = NodeAggregation::prepare_job(node_aggregation_job_metadata, &*object_store, keystore) + .await + .unwrap(); + + let artifacts = NodeAggregation::process_job(job, object_store.clone(), 500, Instant::now()) + .await + .unwrap(); + let aggregations = AggregationWrapper(artifacts.next_aggregations); let expected_results_object_store_config = ObjectStoreConfig { diff --git a/prover/crates/bin/witness_vector_generator/src/generator.rs b/prover/crates/bin/witness_vector_generator/src/generator.rs index 6695905c07e..646dd8ffda7 100644 --- a/prover/crates/bin/witness_vector_generator/src/generator.rs +++ b/prover/crates/bin/witness_vector_generator/src/generator.rs @@ -70,7 +70,7 @@ impl WitnessVectorGenerator { keystore: &Keystore, ) -> anyhow::Result { let finalization_hints = keystore - .load_finalization_hints(job.setup_data_key.clone()) + .load_finalization_hints(job.setup_data_key) .context("get_finalization_hints()")?; let cs = match job.circuit_wrapper.clone() { CircuitWrapper::Base(base_circuit) => { diff --git a/prover/crates/lib/keystore/Cargo.toml b/prover/crates/lib/keystore/Cargo.toml index 617030754f8..4d9addc26bc 100644 --- a/prover/crates/lib/keystore/Cargo.toml +++ b/prover/crates/lib/keystore/Cargo.toml @@ -27,6 +27,8 @@ once_cell.workspace = true md5.workspace = true sha3.workspace = true hex.workspace = true +tokio.workspace = true +futures = { workspace = true, features = ["compat"] } [features] default = [] diff --git a/prover/crates/lib/keystore/src/keystore.rs b/prover/crates/lib/keystore/src/keystore.rs index 28ce989287c..6225943e3cd 100644 --- a/prover/crates/lib/keystore/src/keystore.rs +++ b/prover/crates/lib/keystore/src/keystore.rs @@ -1,7 +1,9 @@ use std::{ + collections::HashMap, fs::{self, File}, io::Read, path::{Path, PathBuf}, + sync::Arc, }; use anyhow::Context as _; @@ -14,7 +16,7 @@ use circuit_definitions::{ }, zkevm_circuits::scheduler::aux::BaseLayerCircuitType, }; -use serde::{Deserialize, Serialize}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; use zkevm_test_harness::data_source::{in_memory_data_source::InMemoryDataSource, SetupDataSource}; use zksync_basic_types::basic_fri_types::AggregationRound; use zksync_prover_fri_types::ProverServiceDataKey; @@ -24,6 +26,7 @@ use zksync_utils::env::Workspace; use crate::GoldilocksGpuProverSetupData; use crate::{GoldilocksProverSetupData, VkCommitments}; +#[derive(Debug, Clone, Copy)] pub enum ProverServiceDataType { VerificationKey, SetupData, @@ -209,7 +212,7 @@ impl Keystore { key: ProverServiceDataKey, hint: &FinalizationHintsForProver, ) -> anyhow::Result<()> { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::FinalizationHints); + let filepath = self.get_file_path(key, ProverServiceDataType::FinalizationHints); tracing::info!("saving finalization hints for {:?} to: {:?}", key, filepath); let serialized = @@ -267,7 +270,7 @@ impl Keystore { &self, key: ProverServiceDataKey, ) -> anyhow::Result { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::SetupData); + let filepath = self.get_file_path(key, ProverServiceDataType::SetupData); let mut file = File::open(filepath.clone()) .with_context(|| format!("Failed reading setup-data from path: {filepath:?}"))?; @@ -286,7 +289,7 @@ impl Keystore { &self, key: ProverServiceDataKey, ) -> anyhow::Result { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::SetupData); + let filepath = self.get_file_path(key, ProverServiceDataType::SetupData); let mut file = File::open(filepath.clone()) .with_context(|| format!("Failed reading setup-data from path: {filepath:?}"))?; @@ -301,7 +304,7 @@ impl Keystore { } pub fn is_setup_data_present(&self, key: &ProverServiceDataKey) -> bool { - Path::new(&self.get_file_path(key.clone(), ProverServiceDataType::SetupData)).exists() + Path::new(&self.get_file_path(*key, ProverServiceDataType::SetupData)).exists() } pub fn save_setup_data_for_circuit_type( @@ -309,7 +312,7 @@ impl Keystore { key: ProverServiceDataKey, serialized_setup_data: &Vec, ) -> anyhow::Result<()> { - let filepath = self.get_file_path(key.clone(), ProverServiceDataType::SetupData); + let filepath = self.get_file_path(key, ProverServiceDataType::SetupData); tracing::info!("saving {:?} setup data to: {:?}", key, filepath); std::fs::write(filepath.clone(), serialized_setup_data) .with_context(|| format!("Failed saving setup-data at path: {filepath:?}")) @@ -465,4 +468,49 @@ impl Keystore { pub fn save_commitments(&self, commitments: &VkCommitments) -> anyhow::Result<()> { Self::save_json_pretty(self.get_base_path().join("commitments.json"), &commitments) } + + /// Async loads mapping of all circuits to setup key, if successful + pub async fn load_all_setup_key_mapping( + &self, + ) -> anyhow::Result>> { + self.load_key_mapping(ProverServiceDataType::SetupData) + .await + } + + /// Async loads mapping of all circuits to finalization hints, if successful + pub async fn load_all_finalization_hints_mapping( + &self, + ) -> anyhow::Result>> { + self.load_key_mapping(ProverServiceDataType::FinalizationHints) + .await + } + + /// Async function that loads mapping from disk. + /// Whilst IO is not parallelizable, ser/de is. + async fn load_key_mapping( + &self, + data_type: ProverServiceDataType, + ) -> anyhow::Result>> { + let mut mapping: HashMap> = HashMap::new(); + + // Load each file in parallel. Note that FS access is not necessarily parallel, but + // deserialization is. For larger files, it makes a big difference. + // Note: `collect` is important, because iterators are lazy, and otherwise we won't actually + // spawn threads. + let handles: Vec<_> = ProverServiceDataKey::all() + .into_iter() + .map(|key| { + let filepath = self.get_file_path(key, data_type); + tokio::task::spawn_blocking(move || { + let data = Self::load_bincode_from_file(filepath)?; + anyhow::Ok((key, Arc::new(data))) + }) + }) + .collect(); + for handle in futures::future::join_all(handles).await { + let (key, setup_data) = handle.context("future loading key panicked")??; + mapping.insert(key, setup_data); + } + Ok(mapping) + } } diff --git a/prover/crates/lib/keystore/src/setup_data_generator.rs b/prover/crates/lib/keystore/src/setup_data_generator.rs index e69184ee936..c4790d67fea 100644 --- a/prover/crates/lib/keystore/src/setup_data_generator.rs +++ b/prover/crates/lib/keystore/src/setup_data_generator.rs @@ -33,7 +33,7 @@ pub fn generate_setup_data_common( let (finalization, vk) = if circuit.is_base_layer() { ( - Some(keystore.load_finalization_hints(circuit.clone())?), + Some(keystore.load_finalization_hints(circuit)?), data_source .get_base_layer_vk(circuit.circuit_id) .unwrap() @@ -41,7 +41,7 @@ pub fn generate_setup_data_common( ) } else { ( - Some(keystore.load_finalization_hints(circuit.clone())?), + Some(keystore.load_finalization_hints(circuit)?), data_source .get_recursion_layer_vk(circuit.circuit_id) .unwrap() @@ -86,7 +86,7 @@ pub trait SetupDataGenerator { ); return Ok("Skipped".to_string()); } - let serialized = self.generate_setup_data(circuit.clone())?; + let serialized = self.generate_setup_data(circuit)?; let digest = md5::compute(&serialized); if !dry_run { @@ -109,7 +109,7 @@ pub trait SetupDataGenerator { .iter() .map(|circuit| { let digest = self - .generate_and_write_setup_data(circuit.clone(), dry_run, recompute_if_missing) + .generate_and_write_setup_data(*circuit, dry_run, recompute_if_missing) .context(circuit.name()) .unwrap(); (circuit.name(), digest) diff --git a/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json b/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json new file mode 100644 index 00000000000..c99572bcc8e --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2.json @@ -0,0 +1,18 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = $1\n WHERE\n l1_batch_number = $2\n AND sequence_number = $3\n AND aggregation_round = $4\n AND circuit_id = $5\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "1297f0977132185d6bd4501f490f1cdac8b194f09926c133985479c533a651f2" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json b/prover/crates/lib/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json deleted file mode 100644 index 5fe5032746e..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE node_aggregation_witness_jobs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n id = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "16548daf69e9ff0528904be2e142254a457665179d9cf0a3c0b18c3fe09e4838" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json b/prover/crates/lib/prover_dal/.sqlx/query-1a1b8595ccf645981050fdeb87c572f6c620f86bf6c53334878490c7004331f4.json similarity index 77% rename from prover/crates/lib/prover_dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json rename to prover/crates/lib/prover_dal/.sqlx/query-1a1b8595ccf645981050fdeb87c572f6c620f86bf6c53334878490c7004331f4.json index effc22d6a43..ec61ec67e36 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-1a1b8595ccf645981050fdeb87c572f6c620f86bf6c53334878490c7004331f4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE gpu_prover_queue_fri\n SET\n instance_status = 'available',\n updated_at = NOW()\n WHERE\n instance_host = $1::TEXT::inet\n AND instance_port = $2\n AND instance_status = 'full'\n AND zone = $3\n ", + "query": "\n UPDATE gpu_prover_queue_fri\n SET\n instance_status = 'available',\n updated_at = NOW()\n WHERE\n instance_host = $1::TEXT::INET\n AND instance_port = $2\n AND instance_status = 'full'\n AND zone = $3\n ", "describe": { "columns": [], "parameters": { @@ -12,5 +12,5 @@ }, "nullable": [] }, - "hash": "fcddeb96dcd1611dedb2091c1be304e8a35fd65bf37e976b7106f57c57e70b9b" + "hash": "1a1b8595ccf645981050fdeb87c572f6c620f86bf6c53334878490c7004331f4" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json b/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json new file mode 100644 index 00000000000..05163dcfa2e --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8.json @@ -0,0 +1,38 @@ +{ + "db_name": "PostgreSQL", + "query": "\n SELECT\n protocol_version AS \"protocol_version!\",\n protocol_version_patch AS \"protocol_version_patch!\",\n COUNT(*) FILTER (\n WHERE\n status = 'queued'\n ) AS queued,\n COUNT(*) FILTER (\n WHERE\n status = 'in_progress'\n ) AS in_progress\n FROM\n prover_jobs_fri\n WHERE\n status IN ('queued', 'in_progress')\n AND protocol_version IS NOT NULL\n GROUP BY\n protocol_version,\n protocol_version_patch\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "protocol_version!", + "type_info": "Int4" + }, + { + "ordinal": 1, + "name": "protocol_version_patch!", + "type_info": "Int4" + }, + { + "ordinal": 2, + "name": "queued", + "type_info": "Int8" + }, + { + "ordinal": 3, + "name": "in_progress", + "type_info": "Int8" + } + ], + "parameters": { + "Left": [] + }, + "nullable": [ + true, + false, + null, + null + ] + }, + "hash": "29f7a564a8373f7e44840e8e9e7d0cd5c6b1122c35d7ffdbbba30327ca3fb5a8" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json b/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json new file mode 100644 index 00000000000..50d121213fb --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5.json @@ -0,0 +1,19 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = $1,\n attempts = $2\n WHERE\n l1_batch_number = $3\n AND sequence_number = $4\n AND aggregation_round = $5\n AND circuit_id = $6\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int4", + "Int2", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "2d1461e068e43fd16714610b383cf8c93630d529ec96e67aac078f18196f61a5" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json b/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json new file mode 100644 index 00000000000..bf8db798e7d --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + true, + true + ] + }, + "hash": "35a76415cb746d03da31481edc65adefab0bf3abf6853a6d36123c8adcaf813b" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json b/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json new file mode 100644 index 00000000000..d7eb6a32b42 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + true, + true + ] + }, + "hash": "3727d5614d2fe2a4d96f880eb72cd48c95ca5b4520dde415a2b5ff32ece47c86" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json b/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json new file mode 100644 index 00000000000..c97fe7f4042 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da.json @@ -0,0 +1,47 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'queued',\n updated_at = NOW(),\n processing_started_at = NOW()\n WHERE\n l1_batch_number = $1\n AND attempts >= $2\n AND (\n status = 'in_progress'\n OR status = 'failed'\n )\n RETURNING\n l1_batch_number,\n status,\n attempts,\n error,\n picked_by\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "status", + "type_info": "Text" + }, + { + "ordinal": 2, + "name": "attempts", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "error", + "type_info": "Text" + }, + { + "ordinal": 4, + "name": "picked_by", + "type_info": "Text" + } + ], + "parameters": { + "Left": [ + "Int8", + "Int2" + ] + }, + "nullable": [ + false, + false, + false, + true, + true + ] + }, + "hash": "37ad15f54f4a6f4f79c71a857f3a8d4cc59246dda91b19526e73f27a17c8e3da" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json b/prover/crates/lib/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json deleted file mode 100644 index 15a10f7ce3c..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n leaf_aggregation_witness_jobs_fri\n WHERE\n id = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "53f78fdee39b113d2f55f6f951bd94f28b7b2b60d551d552a9b0bab1f1791e39" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json b/prover/crates/lib/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json deleted file mode 100644 index 94dbaa80a10..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE scheduler_witness_jobs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "5db868e03dc6901a0afa06f82a37a1a04821495487a80595cc9b523dac6ac8e9" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json b/prover/crates/lib/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json deleted file mode 100644 index 29838881a52..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n id = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "67f5f3a015dc478f02f4f701c90d0fc9ac9a7f3dce2ba48c2d0e6f38b6ba455a" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json b/prover/crates/lib/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json deleted file mode 100644 index 9053a0f5abb..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n recursion_tip_witness_jobs_fri\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "6f20d468efe916f8e92cbf259b37ac83cd32a628d3e01e5cd1949c519683a352" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json b/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json new file mode 100644 index 00000000000..f8b141a8dac --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at)\n VALUES\n ($1, 'waiting_for_proofs', 1, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "73266a8526c6adc315900e2e95441976a264759c4060c1a38e466ee2052fc17d" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-2e6e2b68efc28114f44616b68fcf1d4d9a4b83a8b42846d8373ea13b96d612cf.json b/prover/crates/lib/prover_dal/.sqlx/query-776ef00deb25b0453c1eb38c5eaa81aef0d77e0a4b02307f59e289c0e61717c5.json similarity index 88% rename from prover/crates/lib/prover_dal/.sqlx/query-2e6e2b68efc28114f44616b68fcf1d4d9a4b83a8b42846d8373ea13b96d612cf.json rename to prover/crates/lib/prover_dal/.sqlx/query-776ef00deb25b0453c1eb38c5eaa81aef0d77e0a4b02307f59e289c0e61717c5.json index 41428e91fee..c39b660fa2e 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-2e6e2b68efc28114f44616b68fcf1d4d9a4b83a8b42846d8373ea13b96d612cf.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-776ef00deb25b0453c1eb38c5eaa81aef0d77e0a4b02307f59e289c0e61717c5.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n INSERT INTO\n gpu_prover_queue_fri (\n instance_host,\n instance_port,\n instance_status,\n specialized_prover_group_id,\n zone,\n created_at,\n updated_at,\n protocol_version,\n protocol_version_patch\n )\n VALUES\n (CAST($1::TEXT AS inet), $2, 'available', $3, $4, NOW(), NOW(), $5, $6)\n ON CONFLICT (instance_host, instance_port, zone) DO\n UPDATE\n SET\n instance_status = 'available',\n specialized_prover_group_id = $3,\n zone = $4,\n updated_at = NOW(),\n protocol_version = $5,\n protocol_version_patch = $6\n ", + "query": "\n INSERT INTO\n gpu_prover_queue_fri (\n instance_host,\n instance_port,\n instance_status,\n specialized_prover_group_id,\n zone,\n created_at,\n updated_at,\n protocol_version,\n protocol_version_patch\n )\n VALUES\n (CAST($1::TEXT AS INET), $2, 'available', $3, $4, NOW(), NOW(), $5, $6)\n ON CONFLICT (instance_host, instance_port, zone) DO\n UPDATE\n SET\n instance_status = 'available',\n specialized_prover_group_id = $3,\n zone = $4,\n updated_at = NOW(),\n protocol_version = $5,\n protocol_version_patch = $6\n ", "describe": { "columns": [], "parameters": { @@ -15,5 +15,5 @@ }, "nullable": [] }, - "hash": "2e6e2b68efc28114f44616b68fcf1d4d9a4b83a8b42846d8373ea13b96d612cf" + "hash": "776ef00deb25b0453c1eb38c5eaa81aef0d77e0a4b02307f59e289c0e61717c5" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json b/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json new file mode 100644 index 00000000000..140b8f12675 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2.json @@ -0,0 +1,60 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE prover_jobs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n id = (\n SELECT\n id\n FROM\n prover_jobs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $2\n ORDER BY\n l1_batch_number ASC,\n aggregation_round ASC,\n circuit_id ASC,\n id ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n prover_jobs_fri.id,\n prover_jobs_fri.l1_batch_number,\n prover_jobs_fri.circuit_id,\n prover_jobs_fri.aggregation_round,\n prover_jobs_fri.sequence_number,\n prover_jobs_fri.depth,\n prover_jobs_fri.is_node_final_proof\n ", + "describe": { + "columns": [ + { + "ordinal": 0, + "name": "id", + "type_info": "Int8" + }, + { + "ordinal": 1, + "name": "l1_batch_number", + "type_info": "Int8" + }, + { + "ordinal": 2, + "name": "circuit_id", + "type_info": "Int2" + }, + { + "ordinal": 3, + "name": "aggregation_round", + "type_info": "Int2" + }, + { + "ordinal": 4, + "name": "sequence_number", + "type_info": "Int4" + }, + { + "ordinal": 5, + "name": "depth", + "type_info": "Int4" + }, + { + "ordinal": 6, + "name": "is_node_final_proof", + "type_info": "Bool" + } + ], + "parameters": { + "Left": [ + "Int4", + "Int4", + "Text" + ] + }, + "nullable": [ + false, + false, + false, + false, + false, + false, + false + ] + }, + "hash": "7d20c0bf35625185c1f6c675aa8fcddbb47c5e9965443f118f8edd7d562734a2" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json b/prover/crates/lib/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json deleted file mode 100644 index c8e8a7aa603..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n node_aggregation_witness_jobs_fri\n WHERE\n id = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "806b82a9effd885ba537a2a1c7d7227120a8279db1875d26ccae5ee0785f46a9" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json b/prover/crates/lib/prover_dal/.sqlx/query-88b83c02ef8e3129ff6a416f5d71c765b3be3958cab0f3bde03f51e03777afd4.json similarity index 75% rename from prover/crates/lib/prover_dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json rename to prover/crates/lib/prover_dal/.sqlx/query-88b83c02ef8e3129ff6a416f5d71c765b3be3958cab0f3bde03f51e03777afd4.json index 14b4115b30e..63df4b26c81 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-88b83c02ef8e3129ff6a416f5d71c765b3be3958cab0f3bde03f51e03777afd4.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE gpu_prover_queue_fri\n SET\n instance_status = $1,\n updated_at = NOW()\n WHERE\n instance_host = $2::TEXT::inet\n AND instance_port = $3\n AND zone = $4\n ", + "query": "\n UPDATE gpu_prover_queue_fri\n SET\n instance_status = $1,\n updated_at = NOW()\n WHERE\n instance_host = $2::TEXT::INET\n AND instance_port = $3\n AND zone = $4\n ", "describe": { "columns": [], "parameters": { @@ -13,5 +13,5 @@ }, "nullable": [] }, - "hash": "b4794e6a0c2366d5d95ab373c310103263af3ff5cb6c9dc5df59d3cd2a5e56b4" + "hash": "88b83c02ef8e3129ff6a416f5d71c765b3be3958cab0f3bde03f51e03777afd4" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json b/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json new file mode 100644 index 00000000000..d23ed8d9fc8 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd.json @@ -0,0 +1,17 @@ +{ + "db_name": "PostgreSQL", + "query": "\n UPDATE leaf_aggregation_witness_jobs_fri\n SET\n status = $1,\n attempts = $2\n WHERE\n l1_batch_number = $3\n AND circuit_id = $4\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Text", + "Int2", + "Int8", + "Int2" + ] + }, + "nullable": [] + }, + "hash": "9730c8225ff2cf3111185e81f602a4a98ec63eb942c73ce4448d0957346047cd" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json b/prover/crates/lib/prover_dal/.sqlx/query-a471aeb02911569bc201db0aa017c0d0f55630a33b69c9a0fefd9cb8be9356ee.json similarity index 78% rename from prover/crates/lib/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json rename to prover/crates/lib/prover_dal/.sqlx/query-a471aeb02911569bc201db0aa017c0d0f55630a33b69c9a0fefd9cb8be9356ee.json index fd7c7c7874d..d21f1f18570 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-a471aeb02911569bc201db0aa017c0d0f55630a33b69c9a0fefd9cb8be9356ee.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n SELECT\n instance_status\n FROM\n gpu_prover_queue_fri\n WHERE\n instance_host = $1::TEXT::inet\n AND instance_port = $2\n AND zone = $3\n ", + "query": "\n SELECT\n instance_status\n FROM\n gpu_prover_queue_fri\n WHERE\n instance_host = $1::TEXT::INET\n AND instance_port = $2\n AND zone = $3\n ", "describe": { "columns": [ { @@ -20,5 +20,5 @@ false ] }, - "hash": "2095e5646c382ccbc6e3bafdeddaae31358088e142dff51c9f0bde8f386900d3" + "hash": "a471aeb02911569bc201db0aa017c0d0f55630a33b69c9a0fefd9cb8be9356ee" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json b/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json new file mode 100644 index 00000000000..93532150f7f --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at)\n VALUES\n ($1, $2, NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "a817f0fec85388b3e2510ce259208a01b63ae4aa03c983c3a52c802d585e5a80" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json b/prover/crates/lib/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json deleted file mode 100644 index bdd22927d38..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n scheduler_witness_jobs_fri\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "b321c5ba22358cbb1fd9c627f1e7b56187686173327498ac75424593547c19c5" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json b/prover/crates/lib/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json deleted file mode 100644 index 0ca284a3f57..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040.json +++ /dev/null @@ -1,22 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n SELECT\n attempts\n FROM\n witness_inputs_fri\n WHERE\n l1_batch_number = $1\n ", - "describe": { - "columns": [ - { - "ordinal": 0, - "name": "attempts", - "type_info": "Int2" - } - ], - "parameters": { - "Left": [ - "Int8" - ] - }, - "nullable": [ - false - ] - }, - "hash": "b3d71dbe14bcd94131b29b64dcb49b6370c211a7fc24ad03a5f0e327f9d18040" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json b/prover/crates/lib/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json deleted file mode 100644 index c1f9806625d..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE recursion_tip_witness_jobs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "bf06bd08d8ccf67fc00bbc364715263556f258565f79cbb40f5ecc1a4f6402f5" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json b/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json new file mode 100644 index 00000000000..cadc931fa1c --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at)\n VALUES\n ($1, $2, 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id, depth) DO\n UPDATE\n SET\n status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "c8daa62b3835c15fafb3f83deabb5a4672ad50a9de92c84d939ac4c69842e355" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json b/prover/crates/lib/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json deleted file mode 100644 index 9121539b317..00000000000 --- a/prover/crates/lib/prover_dal/.sqlx/query-d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'failed',\n error = $1,\n updated_at = NOW()\n WHERE\n l1_batch_number = $2\n AND status != 'successful'\n ", - "describe": { - "columns": [], - "parameters": { - "Left": [ - "Text", - "Int8" - ] - }, - "nullable": [] - }, - "hash": "d4949debfe0dc5112204cd196c68b02c44b099e27e3c45c5c810cd5fcd8884ed" -} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json b/prover/crates/lib/prover_dal/.sqlx/query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json similarity index 54% rename from prover/crates/lib/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json rename to prover/crates/lib/prover_dal/.sqlx/query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json index c353ecf1bad..cf9ff8396ef 100644 --- a/prover/crates/lib/prover_dal/.sqlx/query-d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48.json +++ b/prover/crates/lib/prover_dal/.sqlx/query-e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed.json @@ -1,6 +1,6 @@ { "db_name": "PostgreSQL", - "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $3\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n l1_batch_number <= $1\n AND status = 'queued'\n AND protocol_version = $2\n AND protocol_version_patch = $4\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.l1_batch_number\n ", + "query": "\n UPDATE witness_inputs_fri\n SET\n status = 'in_progress',\n attempts = attempts + 1,\n updated_at = NOW(),\n processing_started_at = NOW(),\n picked_by = $2\n WHERE\n l1_batch_number = (\n SELECT\n l1_batch_number\n FROM\n witness_inputs_fri\n WHERE\n status = 'queued'\n AND protocol_version = $1\n AND protocol_version_patch = $3\n ORDER BY\n l1_batch_number ASC\n LIMIT\n 1\n FOR UPDATE\n SKIP LOCKED\n )\n RETURNING\n witness_inputs_fri.l1_batch_number\n ", "describe": { "columns": [ { @@ -11,7 +11,6 @@ ], "parameters": { "Left": [ - "Int8", "Int4", "Text", "Int4" @@ -21,5 +20,5 @@ false ] }, - "hash": "d91c931e2a14cf1183a608d041fc6fadb8e12a9218399d189b4d95e2ca4fcc48" + "hash": "e438a4f0c705fcb39e017912ce8e1bb675a86ae14a863fa31eb513af65d606ed" } diff --git a/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json b/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json new file mode 100644 index 00000000000..4ee9278fe42 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e.json @@ -0,0 +1,15 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n scheduler_witness_jobs_fri (\n l1_batch_number,\n scheduler_partial_input_blob_url,\n status,\n created_at,\n updated_at\n )\n VALUES\n ($1, '', 'waiting_for_proofs', NOW(), NOW())\n ON CONFLICT (l1_batch_number) DO\n UPDATE\n SET\n status = $2\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Text" + ] + }, + "nullable": [] + }, + "hash": "e875dcbbdaed6998dbea45d4eab5d005d8760c4809b7aef902155196873da66e" +} diff --git a/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json b/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json new file mode 100644 index 00000000000..f8e92b1ad66 --- /dev/null +++ b/prover/crates/lib/prover_dal/.sqlx/query-eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e.json @@ -0,0 +1,16 @@ +{ + "db_name": "PostgreSQL", + "query": "\n INSERT INTO\n leaf_aggregation_witness_jobs_fri (\n l1_batch_number,\n circuit_id,\n status,\n number_of_basic_circuits,\n created_at,\n updated_at\n )\n VALUES\n ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW())\n ON CONFLICT (l1_batch_number, circuit_id) DO\n UPDATE\n SET\n status = $3\n ", + "describe": { + "columns": [], + "parameters": { + "Left": [ + "Int8", + "Int2", + "Text" + ] + }, + "nullable": [] + }, + "hash": "eec29cbff034818f4fb5ec1e6ad38e1010d7389457b3c97e9b238a3a0291a54e" +} diff --git a/prover/crates/lib/prover_dal/src/cli_test_dal.rs b/prover/crates/lib/prover_dal/src/cli_test_dal.rs index 069fa9c6a41..d0841820337 100644 --- a/prover/crates/lib/prover_dal/src/cli_test_dal.rs +++ b/prover/crates/lib/prover_dal/src/cli_test_dal.rs @@ -20,14 +20,23 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, sequence_number: usize, ) { - sqlx::query(&format!( - "UPDATE prover_jobs_fri SET status = '{}' - WHERE l1_batch_number = {} - AND sequence_number = {} - AND aggregation_round = {} - AND circuit_id = {}", - status, batch_number.0, sequence_number, aggregation_round, circuit_id, - )) + sqlx::query!( + r#" + UPDATE prover_jobs_fri + SET + status = $1 + WHERE + l1_batch_number = $2 + AND sequence_number = $3 + AND aggregation_round = $4 + AND circuit_id = $5 + "#, + status.to_string(), + batch_number.0 as i64, + sequence_number as i64, + aggregation_round as i16, + circuit_id as i64, + ) .execute(self.storage.conn()) .await .unwrap(); @@ -39,8 +48,8 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, circuit_id: u8, ) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO leaf_aggregation_witness_jobs_fri ( l1_batch_number, @@ -51,13 +60,16 @@ impl CliTestDal<'_, '_> { updated_at ) VALUES - ({}, {}, 'waiting_for_proofs', 2, NOW(), NOW()) + ($1, $2, 'waiting_for_proofs', 2, NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id) DO UPDATE - SET status = '{}' - ", - batch_number.0, circuit_id, status - )) + SET + status = $3 + "#, + batch_number.0 as i64, + circuit_id as i16, + status.to_string() + ) .execute(self.storage.conn()) .await .unwrap(); @@ -69,48 +81,41 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, circuit_id: u8, ) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO - node_aggregation_witness_jobs_fri ( - l1_batch_number, - circuit_id, - status, - created_at, - updated_at - ) + node_aggregation_witness_jobs_fri (l1_batch_number, circuit_id, status, created_at, updated_at) VALUES - ({}, {}, 'waiting_for_proofs', NOW(), NOW()) + ($1, $2, 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number, circuit_id, depth) DO UPDATE - SET status = '{}' - ", - batch_number.0, circuit_id, status, - )) + SET + status = $3 + "#, + batch_number.0 as i64, + circuit_id as i16, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); } pub async fn insert_rt_job(&mut self, status: WitnessJobStatus, batch_number: L1BatchNumber) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO - recursion_tip_witness_jobs_fri ( - l1_batch_number, - status, - number_of_final_node_jobs, - created_at, - updated_at - ) + recursion_tip_witness_jobs_fri (l1_batch_number, status, number_of_final_node_jobs, created_at, updated_at) VALUES - ({}, 'waiting_for_proofs',1, NOW(), NOW()) + ($1, 'waiting_for_proofs', 1, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = '{}' - ", - batch_number.0, status, - )) + SET + status = $2 + "#, + batch_number.0 as i64, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); @@ -121,8 +126,8 @@ impl CliTestDal<'_, '_> { status: WitnessJobStatus, batch_number: L1BatchNumber, ) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO scheduler_witness_jobs_fri ( l1_batch_number, @@ -132,13 +137,15 @@ impl CliTestDal<'_, '_> { updated_at ) VALUES - ({}, '', 'waiting_for_proofs', NOW(), NOW()) + ($1, '', 'waiting_for_proofs', NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = '{}' - ", - batch_number.0, status, - )) + SET + status = $2 + "#, + batch_number.0 as i64, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); @@ -149,23 +156,20 @@ impl CliTestDal<'_, '_> { status: ProofCompressionJobStatus, batch_number: L1BatchNumber, ) { - sqlx::query(&format!( - " + sqlx::query!( + r#" INSERT INTO - proof_compression_jobs_fri ( - l1_batch_number, - status, - created_at, - updated_at - ) + proof_compression_jobs_fri (l1_batch_number, status, created_at, updated_at) VALUES - ({}, '{}', NOW(), NOW()) + ($1, $2, NOW(), NOW()) ON CONFLICT (l1_batch_number) DO UPDATE - SET status = '{}' - ", - batch_number.0, status, status, - )) + SET + status = $2 + "#, + batch_number.0 as i64, + status.to_string(), + ) .execute(self.storage.conn()) .await .unwrap(); @@ -180,15 +184,25 @@ impl CliTestDal<'_, '_> { batch_number: L1BatchNumber, sequence_number: usize, ) { - sqlx::query(&format!( - "UPDATE prover_jobs_fri - SET status = '{}', attempts = {} - WHERE l1_batch_number = {} - AND sequence_number = {} - AND aggregation_round = {} - AND circuit_id = {}", - status, attempts, batch_number.0, sequence_number, aggregation_round, circuit_id, - )) + sqlx::query!( + r#" + UPDATE prover_jobs_fri + SET + status = $1, + attempts = $2 + WHERE + l1_batch_number = $3 + AND sequence_number = $4 + AND aggregation_round = $5 + AND circuit_id = $6 + "#, + status.to_string(), + attempts as i64, + batch_number.0 as i64, + sequence_number as i64, + aggregation_round as i64, + circuit_id as i16, + ) .execute(self.storage.conn()) .await .unwrap(); @@ -201,13 +215,21 @@ impl CliTestDal<'_, '_> { circuit_id: u8, batch_number: L1BatchNumber, ) { - sqlx::query(&format!( - "UPDATE leaf_aggregation_witness_jobs_fri - SET status = '{}', attempts = {} - WHERE l1_batch_number = {} - AND circuit_id = {}", - status, attempts, batch_number.0, circuit_id, - )) + sqlx::query!( + r#" + UPDATE leaf_aggregation_witness_jobs_fri + SET + status = $1, + attempts = $2 + WHERE + l1_batch_number = $3 + AND circuit_id = $4 + "#, + status.to_string(), + attempts as i64, + batch_number.0 as i64, + circuit_id as i16, + ) .execute(self.storage.conn()) .await .unwrap(); diff --git a/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs b/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs index aa4810ad2f6..12f719d6401 100644 --- a/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_gpu_prover_queue_dal.rs @@ -96,7 +96,7 @@ impl FriGpuProverQueueDal<'_, '_> { protocol_version_patch ) VALUES - (CAST($1::TEXT AS inet), $2, 'available', $3, $4, NOW(), NOW(), $5, $6) + (CAST($1::TEXT AS INET), $2, 'available', $3, $4, NOW(), NOW(), $5, $6) ON CONFLICT (instance_host, instance_port, zone) DO UPDATE SET @@ -132,7 +132,7 @@ impl FriGpuProverQueueDal<'_, '_> { instance_status = $1, updated_at = NOW() WHERE - instance_host = $2::TEXT::inet + instance_host = $2::TEXT::INET AND instance_port = $3 AND zone = $4 "#, @@ -158,7 +158,7 @@ impl FriGpuProverQueueDal<'_, '_> { instance_status = 'available', updated_at = NOW() WHERE - instance_host = $1::TEXT::inet + instance_host = $1::TEXT::INET AND instance_port = $2 AND instance_status = 'full' AND zone = $3 @@ -184,7 +184,7 @@ impl FriGpuProverQueueDal<'_, '_> { FROM gpu_prover_queue_fri WHERE - instance_host = $1::TEXT::inet + instance_host = $1::TEXT::INET AND instance_port = $2 AND zone = $3 "#, diff --git a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs index 50df1046e67..8c1c497eede 100644 --- a/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_protocol_versions_dal.rs @@ -26,9 +26,7 @@ impl FriProtocolVersionsDal<'_, '_> { ON CONFLICT (id, protocol_version_patch) DO NOTHING "#, id.minor as i32, - l1_verifier_config - .snark_wrapper_vk_hash - .as_bytes(), + l1_verifier_config.snark_wrapper_vk_hash.as_bytes(), id.patch.0 as i32 ) .execute(self.storage.conn()) diff --git a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs index 4e68154290d..71d0c11728b 100644 --- a/prover/crates/lib/prover_dal/src/fri_prover_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_prover_dal.rs @@ -6,8 +6,10 @@ use zksync_basic_types::{ AggregationRound, CircuitIdRoundTuple, CircuitProverStatsEntry, ProtocolVersionedCircuitProverStats, }, - protocol_version::{ProtocolSemanticVersion, ProtocolVersionId}, - prover_dal::{FriProverJobMetadata, ProverJobFriInfo, ProverJobStatus, StuckJobs}, + protocol_version::{ProtocolSemanticVersion, ProtocolVersionId, VersionPatch}, + prover_dal::{ + FriProverJobMetadata, JobCountStatistics, ProverJobFriInfo, ProverJobStatus, StuckJobs, + }, L1BatchNumber, }; use zksync_db_connection::{ @@ -49,6 +51,78 @@ impl FriProverDal<'_, '_> { drop(latency); } + /// Retrieves the next prover job to be proven. Called by WVGs. + /// + /// Prover jobs must be thought of as ordered. + /// Prover must prioritize proving such jobs that will make the chain move forward the fastest. + /// Current ordering: + /// - pick the lowest batch + /// - within the lowest batch, look at the lowest aggregation level (move up the proof tree) + /// - pick the same type of circuit for as long as possible, this maximizes GPU cache reuse + /// + /// NOTE: Most of this function is a duplicate of `get_next_job()`. Get next job will be deleted together with old prover. + pub async fn get_job( + &mut self, + protocol_version: ProtocolSemanticVersion, + picked_by: &str, + ) -> Option { + sqlx::query!( + r#" + UPDATE prover_jobs_fri + SET + status = 'in_progress', + attempts = attempts + 1, + updated_at = NOW(), + processing_started_at = NOW(), + picked_by = $3 + WHERE + id = ( + SELECT + id + FROM + prover_jobs_fri + WHERE + status = 'queued' + AND protocol_version = $1 + AND protocol_version_patch = $2 + ORDER BY + l1_batch_number ASC, + aggregation_round ASC, + circuit_id ASC, + id ASC + LIMIT + 1 + FOR UPDATE + SKIP LOCKED + ) + RETURNING + prover_jobs_fri.id, + prover_jobs_fri.l1_batch_number, + prover_jobs_fri.circuit_id, + prover_jobs_fri.aggregation_round, + prover_jobs_fri.sequence_number, + prover_jobs_fri.depth, + prover_jobs_fri.is_node_final_proof + "#, + protocol_version.minor as i32, + protocol_version.patch.0 as i32, + picked_by, + ) + .fetch_optional(self.storage.conn()) + .await + .expect("failed to get prover job") + .map(|row| FriProverJobMetadata { + id: row.id as u32, + block_number: L1BatchNumber(row.l1_batch_number as u32), + circuit_id: row.circuit_id as u8, + aggregation_round: AggregationRound::try_from(i32::from(row.aggregation_round)) + .unwrap(), + sequence_number: row.sequence_number as usize, + depth: row.depth as u16, + is_node_final_proof: row.is_node_final_proof, + }) + } + pub async fn get_next_job( &mut self, protocol_version: ProtocolSemanticVersion, @@ -445,6 +519,53 @@ impl FriProverDal<'_, '_> { } } + pub async fn get_generic_prover_jobs_stats( + &mut self, + ) -> HashMap { + { + sqlx::query!( + r#" + SELECT + protocol_version AS "protocol_version!", + protocol_version_patch AS "protocol_version_patch!", + COUNT(*) FILTER ( + WHERE + status = 'queued' + ) AS queued, + COUNT(*) FILTER ( + WHERE + status = 'in_progress' + ) AS in_progress + FROM + prover_jobs_fri + WHERE + status IN ('queued', 'in_progress') + AND protocol_version IS NOT NULL + GROUP BY + protocol_version, + protocol_version_patch + "# + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| { + let protocol_semantic_version = ProtocolSemanticVersion::new( + ProtocolVersionId::try_from(row.protocol_version as u16).unwrap(), + VersionPatch(row.protocol_version_patch as u32), + ); + let key = protocol_semantic_version; + let value = JobCountStatistics { + queued: row.queued.unwrap() as usize, + in_progress: row.in_progress.unwrap() as usize, + }; + (key, value) + }) + .collect() + } + } + pub async fn min_unproved_l1_batch_number(&mut self) -> HashMap<(u8, u8), L1BatchNumber> { { sqlx::query!( diff --git a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs index 9958527a98b..2040b444044 100644 --- a/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs +++ b/prover/crates/lib/prover_dal/src/fri_witness_generator_dal.rs @@ -76,7 +76,6 @@ impl FriWitnessGeneratorDal<'_, '_> { /// The blobs arrive from core via prover gateway, as pubdata, this method loads the blobs. pub async fn get_next_basic_circuit_witness_job( &mut self, - last_l1_batch_to_process: u32, protocol_version: ProtocolSemanticVersion, picked_by: &str, ) -> Option { @@ -88,7 +87,7 @@ impl FriWitnessGeneratorDal<'_, '_> { attempts = attempts + 1, updated_at = NOW(), processing_started_at = NOW(), - picked_by = $3 + picked_by = $2 WHERE l1_batch_number = ( SELECT @@ -96,10 +95,9 @@ impl FriWitnessGeneratorDal<'_, '_> { FROM witness_inputs_fri WHERE - l1_batch_number <= $1 - AND status = 'queued' - AND protocol_version = $2 - AND protocol_version_patch = $4 + status = 'queued' + AND protocol_version = $1 + AND protocol_version_patch = $3 ORDER BY l1_batch_number ASC LIMIT @@ -110,7 +108,6 @@ impl FriWitnessGeneratorDal<'_, '_> { RETURNING witness_inputs_fri.l1_batch_number "#, - i64::from(last_l1_batch_to_process), protocol_version.minor as i32, picked_by, protocol_version.patch.0 as i32, @@ -121,28 +118,6 @@ impl FriWitnessGeneratorDal<'_, '_> { .map(|row| L1BatchNumber(row.l1_batch_number as u32)) } - pub async fn get_basic_circuit_witness_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - witness_inputs_fri - WHERE - l1_batch_number = $1 - "#, - i64::from(l1_batch_number.0) - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.attempts as u32); - - Ok(attempts) - } - pub async fn mark_witness_job( &mut self, status: FriWitnessJobStatus, @@ -189,46 +164,6 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(); } - pub async fn mark_witness_job_failed(&mut self, error: &str, block_number: L1BatchNumber) { - sqlx::query!( - r#" - UPDATE witness_inputs_fri - SET - status = 'failed', - error = $1, - updated_at = NOW() - WHERE - l1_batch_number = $2 - AND status != 'successful' - "#, - error, - i64::from(block_number.0) - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - - pub async fn mark_leaf_aggregation_job_failed(&mut self, error: &str, id: u32) { - sqlx::query!( - r#" - UPDATE leaf_aggregation_witness_jobs_fri - SET - status = 'failed', - error = $1, - updated_at = NOW() - WHERE - id = $2 - AND status != 'successful' - "#, - error, - i64::from(id) - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - pub async fn mark_leaf_aggregation_as_successful(&mut self, id: u32, time_taken: Duration) { sqlx::query!( r#" @@ -481,29 +416,6 @@ impl FriWitnessGeneratorDal<'_, '_> { }) } - pub async fn get_leaf_aggregation_job_attempts( - &mut self, - id: u32, - ) -> sqlx::Result> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - leaf_aggregation_witness_jobs_fri - WHERE - id = $1 - "#, - i64::from(id) - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| row.attempts as u32); - - Ok(attempts) - } - async fn prover_job_ids_for( &mut self, block_number: L1BatchNumber, @@ -674,49 +586,6 @@ impl FriWitnessGeneratorDal<'_, '_> { }) } - pub async fn get_node_aggregation_job_attempts( - &mut self, - id: u32, - ) -> sqlx::Result> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - node_aggregation_witness_jobs_fri - WHERE - id = $1 - "#, - i64::from(id) - ) - .fetch_optional(self.storage.conn()) - .await - .unwrap() - .map(|row| row.attempts as u32); - - Ok(attempts) - } - - pub async fn mark_node_aggregation_job_failed(&mut self, error: &str, id: u32) { - sqlx::query!( - r#" - UPDATE node_aggregation_witness_jobs_fri - SET - status = 'failed', - error = $1, - updated_at = NOW() - WHERE - id = $2 - AND status != 'successful' - "#, - error, - i64::from(id) - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } - pub async fn mark_node_aggregation_as_successful(&mut self, id: u32, time_taken: Duration) { sqlx::query!( r#" @@ -1241,46 +1110,42 @@ impl FriWitnessGeneratorDal<'_, '_> { .map(|row| L1BatchNumber(row.l1_batch_number as u32)) } - pub async fn get_recursion_tip_witness_job_attempts( + pub async fn get_witness_job_attempts( &mut self, - l1_batch_number: L1BatchNumber, + job_id: u32, + aggregation_round: AggregationRound, ) -> sqlx::Result> { - let attempts = sqlx::query!( - r#" - SELECT - attempts - FROM - recursion_tip_witness_jobs_fri - WHERE - l1_batch_number = $1 - "#, - l1_batch_number.0 as i64 - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.attempts as u32); + let table = match aggregation_round { + AggregationRound::BasicCircuits => "witness_inputs_fri", + AggregationRound::LeafAggregation => "leaf_aggregation_witness_jobs_fri", + AggregationRound::NodeAggregation => "node_aggregation_witness_jobs_fri", + AggregationRound::RecursionTip => "recursion_tip_witness_jobs_fri", + AggregationRound::Scheduler => "scheduler_witness_jobs_fri", + }; - Ok(attempts) - } + let job_id_column = match aggregation_round { + AggregationRound::BasicCircuits => "l1_batch_number", + AggregationRound::LeafAggregation => "id", + AggregationRound::NodeAggregation => "id", + AggregationRound::RecursionTip => "l1_batch_number", + AggregationRound::Scheduler => "l1_batch_number ", + }; - pub async fn get_scheduler_witness_job_attempts( - &mut self, - l1_batch_number: L1BatchNumber, - ) -> sqlx::Result> { - let attempts = sqlx::query!( + let query = format!( r#" SELECT attempts FROM - scheduler_witness_jobs_fri + {table} WHERE - l1_batch_number = $1 + {job_id_column} = {job_id} "#, - i64::from(l1_batch_number.0) - ) - .fetch_optional(self.storage.conn()) - .await? - .map(|row| row.attempts as u32); + ); + + let attempts = sqlx::query(&query) + .fetch_optional(self.storage.conn()) + .await? + .map(|row| row.get::("attempts") as u32); Ok(attempts) } @@ -1331,54 +1196,51 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(); } - pub async fn mark_recursion_tip_job_failed( + pub async fn mark_witness_job_failed( &mut self, error: &str, - l1_batch_number: L1BatchNumber, + job_id: u32, + aggregation_round: AggregationRound, ) { - sqlx::query!( - r#" - UPDATE recursion_tip_witness_jobs_fri - SET - status = 'failed', - error = $1, - updated_at = NOW() - WHERE - l1_batch_number = $2 - AND status != 'successful' - "#, - error, - l1_batch_number.0 as i64 - ) - .execute(self.storage.conn()) - .await - .unwrap(); - } + let table = match aggregation_round { + AggregationRound::BasicCircuits => "witness_inputs_fri", + AggregationRound::LeafAggregation => "leaf_aggregation_witness_jobs_fri", + AggregationRound::NodeAggregation => "node_aggregation_witness_jobs_fri", + AggregationRound::RecursionTip => "recursion_tip_witness_jobs_fri", + AggregationRound::Scheduler => "scheduler_witness_jobs_fri", + }; - pub async fn mark_scheduler_job_failed(&mut self, error: &str, block_number: L1BatchNumber) { - sqlx::query!( + let job_id_column = match aggregation_round { + AggregationRound::BasicCircuits => "l1_batch_number", + AggregationRound::LeafAggregation => "id", + AggregationRound::NodeAggregation => "id", + AggregationRound::RecursionTip => "l1_batch_number", + AggregationRound::Scheduler => "l1_batch_number ", + }; + + let query = format!( r#" - UPDATE scheduler_witness_jobs_fri + UPDATE {table} SET status = 'failed', - error = $1, + error = {error}, updated_at = NOW() WHERE - l1_batch_number = $2 - AND status != 'successful' + {job_id_column} = {job_id} + AND status != 'successful "#, - error, - i64::from(block_number.0) - ) - .execute(self.storage.conn()) - .await - .unwrap(); + ); + + sqlx::query(&query) + .execute(self.storage.conn()) + .await + .unwrap(); } pub async fn get_witness_jobs_stats( &mut self, aggregation_round: AggregationRound, - ) -> HashMap<(AggregationRound, ProtocolSemanticVersion), JobCountStatistics> { + ) -> HashMap { let table_name = Self::input_table_name_for(aggregation_round); let sql = format!( r#" @@ -1407,7 +1269,7 @@ impl FriWitnessGeneratorDal<'_, '_> { .unwrap(), VersionPatch(row.get::("protocol_version_patch") as u32), ); - let key = (aggregation_round, protocol_semantic_version); + let key = protocol_semantic_version; let value = JobCountStatistics { queued: row.get::("queued") as usize, in_progress: row.get::("in_progress") as usize, @@ -1709,7 +1571,7 @@ impl FriWitnessGeneratorDal<'_, '_> { block_number: L1BatchNumber, max_attempts: u32, ) -> Vec { - let query = format!( + sqlx::query!( r#" UPDATE witness_inputs_fri SET @@ -1717,9 +1579,12 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at = NOW(), processing_started_at = NOW() WHERE - l1_batch_number = {} - AND attempts >= {} - AND (status = 'in_progress' OR status = 'failed') + l1_batch_number = $1 + AND attempts >= $2 + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1728,22 +1593,21 @@ impl FriWitnessGeneratorDal<'_, '_> { picked_by "#, i64::from(block_number.0), - max_attempts - ); - sqlx::query(&query) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { - id: row.get::("l1_batch_number") as u64, - status: row.get("status"), - attempts: row.get::("attempts") as u64, - circuit_id: None, - error: row.get("error"), - picked_by: row.get("picked_by"), - }) - .collect() + max_attempts as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { + id: row.l1_batch_number as u64, + status: row.status, + attempts: row.attempts as u64, + circuit_id: None, + error: row.error, + picked_by: row.picked_by, + }) + .collect() } pub async fn requeue_stuck_leaf_aggregation_jobs_for_batch( @@ -1777,7 +1641,7 @@ impl FriWitnessGeneratorDal<'_, '_> { block_number: L1BatchNumber, max_attempts: u32, ) -> Vec { - let query = format!( + sqlx::query!( r#" UPDATE recursion_tip_witness_jobs_fri SET @@ -1785,9 +1649,12 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at = NOW(), processing_started_at = NOW() WHERE - l1_batch_number = {} - AND attempts >= {} - AND (status = 'in_progress' OR status = 'failed') + l1_batch_number = $1 + AND attempts >= $2 + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1796,22 +1663,21 @@ impl FriWitnessGeneratorDal<'_, '_> { picked_by "#, i64::from(block_number.0), - max_attempts - ); - sqlx::query(&query) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { - id: row.get::("l1_batch_number") as u64, - status: row.get("status"), - attempts: row.get::("attempts") as u64, - circuit_id: None, - error: row.get("error"), - picked_by: row.get("picked_by"), - }) - .collect() + max_attempts as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { + id: row.l1_batch_number as u64, + status: row.status, + attempts: row.attempts as u64, + circuit_id: None, + error: row.error, + picked_by: row.picked_by, + }) + .collect() } pub async fn requeue_stuck_scheduler_jobs_for_batch( @@ -1819,7 +1685,7 @@ impl FriWitnessGeneratorDal<'_, '_> { block_number: L1BatchNumber, max_attempts: u32, ) -> Vec { - let query = format!( + sqlx::query!( r#" UPDATE scheduler_witness_jobs_fri SET @@ -1827,9 +1693,12 @@ impl FriWitnessGeneratorDal<'_, '_> { updated_at = NOW(), processing_started_at = NOW() WHERE - l1_batch_number = {} - AND attempts >= {} - AND (status = 'in_progress' OR status = 'failed') + l1_batch_number = $1 + AND attempts >= $2 + AND ( + status = 'in_progress' + OR status = 'failed' + ) RETURNING l1_batch_number, status, @@ -1838,22 +1707,21 @@ impl FriWitnessGeneratorDal<'_, '_> { picked_by "#, i64::from(block_number.0), - max_attempts - ); - sqlx::query(&query) - .fetch_all(self.storage.conn()) - .await - .unwrap() - .into_iter() - .map(|row| StuckJobs { - id: row.get::("l1_batch_number") as u64, - status: row.get("status"), - attempts: row.get::("attempts") as u64, - circuit_id: None, - error: row.get("error"), - picked_by: row.get("picked_by"), - }) - .collect() + max_attempts as i64 + ) + .fetch_all(self.storage.conn()) + .await + .unwrap() + .into_iter() + .map(|row| StuckJobs { + id: row.l1_batch_number as u64, + status: row.status, + attempts: row.attempts as u64, + circuit_id: None, + error: row.error, + picked_by: row.picked_by, + }) + .collect() } async fn requeue_stuck_jobs_for_batch_in_aggregation_round( diff --git a/prover/crates/lib/prover_fri_types/src/keys.rs b/prover/crates/lib/prover_fri_types/src/keys.rs index 2948fc5f84e..26aa679b4a9 100644 --- a/prover/crates/lib/prover_fri_types/src/keys.rs +++ b/prover/crates/lib/prover_fri_types/src/keys.rs @@ -1,6 +1,8 @@ //! Different key types for object store. -use zksync_types::{basic_fri_types::AggregationRound, L1BatchNumber}; +use zksync_types::{ + basic_fri_types::AggregationRound, prover_dal::FriProverJobMetadata, L1BatchNumber, +}; /// Storage key for a [AggregationWrapper`]. #[derive(Debug, Clone, Copy)] @@ -27,6 +29,18 @@ pub struct FriCircuitKey { pub depth: u16, } +impl From for FriCircuitKey { + fn from(prover_job_metadata: FriProverJobMetadata) -> Self { + FriCircuitKey { + block_number: prover_job_metadata.block_number, + sequence_number: prover_job_metadata.sequence_number, + circuit_id: prover_job_metadata.circuit_id, + aggregation_round: prover_job_metadata.aggregation_round, + depth: prover_job_metadata.depth, + } + } +} + /// Storage key for a [`ZkSyncCircuit`]. #[derive(Debug, Clone, Copy)] pub struct CircuitKey<'a> { diff --git a/prover/crates/lib/prover_fri_types/src/lib.rs b/prover/crates/lib/prover_fri_types/src/lib.rs index c14bc190563..4a8a1b3e406 100644 --- a/prover/crates/lib/prover_fri_types/src/lib.rs +++ b/prover/crates/lib/prover_fri_types/src/lib.rs @@ -1,4 +1,4 @@ -use std::env; +use std::{env, time::Instant}; pub use circuit_definitions; use circuit_definitions::{ @@ -66,7 +66,7 @@ impl StoredObject for CircuitWrapper { serialize_using_bincode!(); } -#[derive(Clone, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] pub enum FriProofWrapper { Base(ZkSyncBaseLayerProof), Recursive(ZkSyncRecursionLayerProof), @@ -98,6 +98,45 @@ impl WitnessVectorArtifacts { } } +/// This structure exists for the transition period between old prover and new prover. +/// We want the 2 codebases to coexist, without impacting each other. +/// Once old prover is deleted, this struct will be renamed to `WitnessVectorArtifacts`. +pub struct WitnessVectorArtifactsTemp { + pub witness_vector: WitnessVec, + pub prover_job: ProverJob, + pub time: Instant, +} + +impl WitnessVectorArtifactsTemp { + pub fn new( + witness_vector: WitnessVec, + prover_job: ProverJob, + time: Instant, + ) -> Self { + Self { + witness_vector, + prover_job, + time, + } + } +} + +/// Data structure containing the proof generated by the circuit prover. +#[derive(Debug)] +pub struct ProverArtifacts { + pub block_number: L1BatchNumber, + pub proof_wrapper: FriProofWrapper, +} + +impl ProverArtifacts { + pub fn new(block_number: L1BatchNumber, proof_wrapper: FriProofWrapper) -> Self { + Self { + block_number, + proof_wrapper, + } + } +} + #[derive(Clone, serde::Serialize, serde::Deserialize)] pub struct ProverJob { pub block_number: L1BatchNumber, @@ -122,12 +161,30 @@ impl ProverJob { } } -#[derive(Debug, Clone, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)] +#[derive(Debug, Clone, Copy, Eq, PartialEq, Hash, serde::Serialize, serde::Deserialize)] pub struct ProverServiceDataKey { pub circuit_id: u8, pub round: AggregationRound, } +impl ProverServiceDataKey { + /// Returns the crypto version of the setup key. + /// + /// Setup key is overloaded in our system. On one hand, it is used as identifier for figuring out which type of proofs are ready. + /// On the other hand, it is also a setup key from prover perspective. + /// The 2 overlap on all aggregation rounds, but NodeAggregation. + /// There's only 1 node key and that belongs to circuit 2. + pub fn crypto_setup_key(self) -> Self { + if let AggregationRound::NodeAggregation = self.round { + return Self { + circuit_id: 2, + round: self.round, + }; + } + self + } +} + fn get_round_for_recursive_circuit_type(circuit_type: u8) -> AggregationRound { match circuit_type { circuit_type if circuit_type == ZkSyncRecursionLayerStorageType::SchedulerCircuit as u8 => { @@ -186,6 +243,12 @@ impl ProverServiceDataKey { } } + pub fn all() -> Vec { + let mut keys = Self::all_boojum(); + keys.push(Self::snark()); + keys + } + pub fn is_base_layer(&self) -> bool { self.round == AggregationRound::BasicCircuits } diff --git a/yarn.lock b/yarn.lock index b70e64f148a..531f49abc00 100644 --- a/yarn.lock +++ b/yarn.lock @@ -6903,7 +6903,7 @@ jest-each@^29.7.0: jest-util "^29.7.0" pretty-format "^29.7.0" -jest-environment-node@^29.7.0: +jest-environment-node@^29.0.3, jest-environment-node@^29.7.0: version "29.7.0" resolved "https://registry.yarnpkg.com/jest-environment-node/-/jest-environment-node-29.7.0.tgz#0b93e111dda8ec120bc8300e6d1fb9576e164376" integrity sha512-DOSwCRqXirTOyheM+4d5YZOrWcdu0LNZ87ewUoywbcb2XR4wKgqiG8vNeYwhjFMbEkfju7wx2GYH0P2gevGvFw== @@ -9816,7 +9816,7 @@ string-length@^4.0.1: char-regex "^1.0.2" strip-ansi "^6.0.0" -"string-width-cjs@npm:string-width@^4.2.0": +"string-width-cjs@npm:string-width@^4.2.0", string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: version "4.2.3" resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== @@ -9833,15 +9833,6 @@ string-width@^2.1.0, string-width@^2.1.1: is-fullwidth-code-point "^2.0.0" strip-ansi "^4.0.0" -string-width@^4.0.0, string-width@^4.1.0, string-width@^4.2.0, string-width@^4.2.2, string-width@^4.2.3: - version "4.2.3" - resolved "https://registry.yarnpkg.com/string-width/-/string-width-4.2.3.tgz#269c7117d27b05ad2e536830a8ec895ef9c6d010" - integrity sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g== - dependencies: - emoji-regex "^8.0.0" - is-fullwidth-code-point "^3.0.0" - strip-ansi "^6.0.1" - string-width@^5.0.1, string-width@^5.1.2: version "5.1.2" resolved "https://registry.yarnpkg.com/string-width/-/string-width-5.1.2.tgz#14f8daec6d81e7221d2a357e668cab73bdbca794" @@ -9908,7 +9899,7 @@ string_decoder@~1.1.1: dependencies: safe-buffer "~5.1.0" -"strip-ansi-cjs@npm:strip-ansi@^6.0.1": +"strip-ansi-cjs@npm:strip-ansi@^6.0.1", strip-ansi@^6.0.0, strip-ansi@^6.0.1: version "6.0.1" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== @@ -9929,13 +9920,6 @@ strip-ansi@^5.1.0: dependencies: ansi-regex "^4.1.0" -strip-ansi@^6.0.0, strip-ansi@^6.0.1: - version "6.0.1" - resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-6.0.1.tgz#9e26c63d30f53443e9489495b2105d37b67a85d9" - integrity sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A== - dependencies: - ansi-regex "^5.0.1" - strip-ansi@^7.0.1: version "7.1.0" resolved "https://registry.yarnpkg.com/strip-ansi/-/strip-ansi-7.1.0.tgz#d5b6568ca689d8561370b0707685d22434faff45" @@ -10781,16 +10765,7 @@ workerpool@6.2.1: resolved "https://registry.yarnpkg.com/workerpool/-/workerpool-6.2.1.tgz#46fc150c17d826b86a008e5a4508656777e9c343" integrity sha512-ILEIE97kDZvF9Wb9f6h5aXK4swSlKGUcOEGiIYb2OOu/IrDU9iwj0fD//SsA6E5ibwJxpEvhullJY4Sl4GcpAw== -"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0": - version "7.0.0" - resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" - integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== - dependencies: - ansi-styles "^4.0.0" - string-width "^4.1.0" - strip-ansi "^6.0.0" - -wrap-ansi@^7.0.0: +"wrap-ansi-cjs@npm:wrap-ansi@^7.0.0", wrap-ansi@^7.0.0: version "7.0.0" resolved "https://registry.yarnpkg.com/wrap-ansi/-/wrap-ansi-7.0.0.tgz#67e145cff510a6a6984bdf1152911d69d2eb9e43" integrity sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q== diff --git a/zk_toolbox/Cargo.lock b/zk_toolbox/Cargo.lock index fd524865d56..297ef404698 100644 --- a/zk_toolbox/Cargo.lock +++ b/zk_toolbox/Cargo.lock @@ -14,18 +14,18 @@ dependencies = [ [[package]] name = "addr2line" -version = "0.22.0" +version = "0.24.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "f5fb1d8e4442bd405fdfd1dacb42792696b0cf9cb15882e5d097b742a676d375" dependencies = [ "gimli", ] [[package]] -name = "adler" -version = "1.0.2" +name = "adler2" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" [[package]] name = "aes" @@ -45,7 +45,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", + "const-random", + "getrandom", "once_cell", + "serde", "version_check", "zerocopy", ] @@ -82,9 +85,9 @@ dependencies = [ [[package]] name = "anstream" -version = "0.6.14" +version = "0.6.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418c75fa768af9c03be99d17643f93f79bbba589895012a80e3452a19ddda15b" +checksum = "64e15c1ab1f89faffbf04a634d5e1962e9074f2741eef6d97f3c4e322426d526" dependencies = [ "anstyle", "anstyle-parse", @@ -97,33 +100,33 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.7" +version = "1.0.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "038dfcf04a5feb68e9c60b21c9625a54c2c0616e79b72b0fd87075a056ae1d1b" +checksum = "1bec1de6f59aedf83baf9ff929c98f2ad654b97c9510f4e70cf6f661d49fd5b1" [[package]] name = "anstyle-parse" -version = "0.2.4" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c03a11a9034d92058ceb6ee011ce58af4a9bf61491aa7e1e59ecd24bd40d22d4" +checksum = "eb47de1e80c2b463c735db5b217a0ddc39d612e7ac9e2e96a5aed1f57616c1cb" dependencies = [ "utf8parse", ] [[package]] name = "anstyle-query" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad186efb764318d35165f1758e7dcef3b10628e26d41a44bc5550652e6804391" +checksum = "6d36fc52c7f6c869915e99412912f22093507da8d9e942ceaf66fe4b7c14422a" dependencies = [ "windows-sys 0.52.0", ] [[package]] name = "anstyle-wincon" -version = "3.0.3" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61a38449feb7068f52bb06c12759005cf459ee52bb4adc1d5a7c4322d716fb19" +checksum = "5bf74e1b6e971609db8ca7a9ce79fd5768ab6ae46441c572e46cf596f59e57f8" dependencies = [ "anstyle", "windows-sys 0.52.0", @@ -131,15 +134,21 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "86fdf8605db99b54d3cd748a44c6d04df638eb5dafb219b135d0149bd0db01f6" + +[[package]] +name = "append-only-vec" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "2bb8633986ce6db557d8a568b0b874d840db9946e589a3de4cf84fb02130745f" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "ascii-canvas" @@ -169,18 +178,18 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "721cae7de5c34fbb2acd27e21e6d2cf7b886dce0c27388d46c4e6c47ea4318dd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -217,27 +226,27 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] name = "autocfg" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" +checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" [[package]] name = "axum" -version = "0.7.5" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" +checksum = "504e3947307ac8326a5437504c517c4b56716c9d98fac0028c2acc7ca47d70ae" dependencies = [ "async-trait", "axum-core", "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "itoa", "matchit", @@ -248,44 +257,44 @@ dependencies = [ "rustversion", "serde", "sync_wrapper 1.0.1", - "tower", + "tower 0.5.1", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a15c63fd72d41492dc4f497196f5da1fb04fb7529e631d73630d1b491e47a2e3" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" dependencies = [ "async-trait", "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", "mime", "pin-project-lite", "rustversion", - "sync_wrapper 0.1.2", + "sync_wrapper 1.0.1", "tower-layer", "tower-service", ] [[package]] name = "backtrace" -version = "0.3.73" +version = "0.3.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cc23269a4f8976d0a4d2e7109211a419fe30e8d88d677cd60b6bc79c5732e0a" +checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" dependencies = [ "addr2line", - "cc", "cfg-if", "libc", "miniz_oxide", "object", "rustc-demangle", + "windows-targets 0.52.6", ] [[package]] @@ -421,6 +430,15 @@ dependencies = [ "zeroize", ] +[[package]] +name = "borsh" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6362ed55def622cddc70a4746a68554d7b687713770de539e59a739b249f8ed" +dependencies = [ + "cfg_aliases", +] + [[package]] name = "bs58" version = "0.5.1" @@ -451,9 +469,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "428d9aa8fbc0670b7b8d6030a7fadd0f86151cae55e4dbbece15f3780a3dfaf3" dependencies = [ "serde", ] @@ -481,9 +499,9 @@ dependencies = [ [[package]] name = "camino" -version = "1.1.7" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0ec6b951b160caa93cc0c7b209e5a3bff7aae9062213451ac99493cd844c239" +checksum = "8b96ec4966b5813e2c0507c1f86115c8c5abaadc3980879c3424042a02fd1ad3" dependencies = [ "serde", ] @@ -513,9 +531,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.1.14" +version = "1.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50d2eb3cd3d1bf4529e31c215ee6f93ec5a3d536d9f578f93d9d33ee19562932" +checksum = "3bbb537bb4a30b90362caddba8f360c0a56bc13d3a5570028e7197204cb54a17" dependencies = [ "jobserver", "libc", @@ -528,6 +546,12 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +[[package]] +name = "cfg_aliases" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + [[package]] name = "chrono" version = "0.4.38" @@ -540,7 +564,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -555,9 +579,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.8" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84b3edb18336f4df585bc9aa31dd99c036dfa5dc5e9a2939a722a188f3a8970d" +checksum = "b0956a43b323ac1afaffc053ed5c4b7c1f1800bacd1683c353aabbb752515dd3" dependencies = [ "clap_builder", "clap_derive", @@ -574,9 +598,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.8" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c09dd5ada6c6c78075d6fd0da3f90d8080651e2d6cc8eb2f1aaa4034ced708" +checksum = "4d72166dd41634086d5803a47eb71ae740e61d84709c36f3c34110173db3961b" dependencies = [ "anstream", "anstyle", @@ -587,21 +611,21 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.8" +version = "4.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bac35c6dafb060fd4d275d9a4ffae97917c13a6327903a8be2153cd964f7085" +checksum = "4ac6a0c7b1a9e9a5186361f67dfa1b88213572f427fb9ab038efb2bd8c582dab" dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] name = "clap_lex" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b82cf0babdbd58558212896d1a4272303a57bdb245c2bf1147185fb45640e70" +checksum = "1462739cb27611015575c0c11df5df7601141071f07518d56fcc1be504cbec97" [[package]] name = "cliclack" @@ -626,7 +650,7 @@ dependencies = [ "coins-core", "digest", "hmac", - "k256 0.13.3", + "k256 0.13.4", "serde", "sha2", "thiserror", @@ -643,7 +667,7 @@ dependencies = [ "hmac", "once_cell", "pbkdf2 0.12.2", - "rand 0.8.5", + "rand", "sha2", "thiserror", ] @@ -670,9 +694,9 @@ dependencies = [ [[package]] name = "colorchoice" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b6a852b24ab71dffc585bcb46eaf7959d175cb865a7152e35b348d1b2960422" +checksum = "d3fd119d74b830634cea2a0f58bbd0d54540518a14397557951e79340abc28c0" [[package]] name = "common" @@ -699,6 +723,12 @@ dependencies = [ "xshell", ] +[[package]] +name = "common-path" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2382f75942f4b3be3690fe4f86365e9c853c1587d6ee58212cebf6e2a9ccd101" + [[package]] name = "compile-fmt" version = "0.1.0" @@ -722,7 +752,7 @@ dependencies = [ "clap", "common", "ethers", - "rand 0.8.5", + "rand", "serde", "serde_json", "serde_yaml", @@ -737,6 +767,12 @@ dependencies = [ "zksync_protobuf_config", ] +[[package]] +name = "configparser" +version = "3.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e57e3272f0190c3f1584272d613719ba5fc7df7f4942fe542e63d949cf3a649b" + [[package]] name = "console" version = "0.15.8" @@ -752,9 +788,9 @@ dependencies = [ [[package]] name = "const-hex" -version = "1.12.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8a24a26d37e1ffd45343323dc9fe6654ceea44c12f2fcb3d7ac29e610bc6" +checksum = "0121754e84117e65f9d90648ee6aa4882a6e63110307ab73967a4c5e7e69e586" dependencies = [ "cfg-if", "cpufeatures", @@ -769,6 +805,26 @@ version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom", + "once_cell", + "tiny-keccak", +] + [[package]] name = "constant_time_eq" version = "0.1.5" @@ -787,15 +843,15 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "608697df725056feaccfa42cffdaeeec3fccc4ffc38358ecd19b243e716a78e0" dependencies = [ "libc", ] @@ -871,7 +927,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] @@ -883,7 +939,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" dependencies = [ "generic-array", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] @@ -905,7 +961,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edb49164822f3ee45b17acd4a208cfc1251410cf0cad9a833234c9890774dd9f" dependencies = [ "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -941,7 +997,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -992,7 +1048,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bef552e6f588e446098f6ba40d89ac146c8c7b64aade83c051ee00bb5d2bc18d" dependencies = [ "serde", - "uuid 1.9.1", + "uuid 1.10.0", ] [[package]] @@ -1033,27 +1089,27 @@ checksum = "5f33878137e4dafd7fa914ad4e259e18a4e8e532b9617a2d0150262bf53abfce" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] name = "derive_more" -version = "1.0.0-beta.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7abbfc297053be59290e3152f8cbcd52c8642e0728b69ee187d991d4c1af08d" +checksum = "4a9b99b9cbbe49445b21764dc0625032a89b145a2642e67603e1c936f5458d05" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "1.0.0-beta.6" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bba3e9872d7c58ce7ef0fcf1844fcc3e23ef2a58377b50df35dd98e42a5726e" +checksum = "cb7330aeadfbe296029522e6c40f315320aba36fc43a5b3632f3795348f3bd22" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", "unicode-xid", ] @@ -1063,6 +1119,12 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "339544cc9e2c4dc3fc7149fd630c5f22263a4fdf18a98afd0075784968b5cf00" +[[package]] +name = "diff" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56254986775e3233ffa9c4d7d3faaf6d36a2c09d30b20687e9f88bc8bafc16c8" + [[package]] name = "digest" version = "0.10.7" @@ -1131,9 +1193,15 @@ checksum = "dcbb2bf8e87535c23f7a8a321e364ce21462d0ff10cb6407820e8e96dfff6653" [[package]] name = "dunce" -version = "1.0.4" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56ce8c6da7551ec6c462cbaf3bfbc75131ebbfa1c944aeaa9dab51ca1c5f0c3b" +checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + +[[package]] +name = "dyn-clone" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" [[package]] name = "ecdsa" @@ -1179,7 +1247,7 @@ checksum = "4a3daa8e81a3963a60642bcc1f90a670680bd4a77535faa384e9d1c79d620871" dependencies = [ "curve25519-dalek", "ed25519", - "rand_core 0.6.4", + "rand_core", "serde", "sha2", "subtle", @@ -1209,7 +1277,7 @@ dependencies = [ "generic-array", "group 0.12.1", "pkcs8 0.9.0", - "rand_core 0.6.4", + "rand_core", "sec1 0.3.0", "subtle", "zeroize", @@ -1228,7 +1296,7 @@ dependencies = [ "generic-array", "group 0.13.0", "pkcs8 0.10.2", - "rand_core 0.6.4", + "rand_core", "sec1 0.7.3", "subtle", "zeroize", @@ -1276,15 +1344,27 @@ dependencies = [ "base64 0.21.7", "bytes", "hex", - "k256 0.13.3", + "k256 0.13.4", "log", - "rand 0.8.5", + "rand", "rlp", "serde", "sha3", "zeroize", ] +[[package]] +name = "enum_dispatch" +version = "0.3.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" +dependencies = [ + "once_cell", + "proc-macro2", + "quote", + "syn 2.0.79", +] + [[package]] name = "envy" version = "0.4.2" @@ -1333,7 +1413,7 @@ dependencies = [ "hex", "hmac", "pbkdf2 0.11.0", - "rand 0.8.5", + "rand", "scrypt", "serde", "serde_json", @@ -1457,7 +1537,7 @@ dependencies = [ "reqwest 0.11.27", "serde", "serde_json", - "syn 2.0.68", + "syn 2.0.79", "toml", "walkdir", ] @@ -1475,7 +1555,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -1492,16 +1572,16 @@ dependencies = [ "elliptic-curve 0.13.8", "ethabi", "generic-array", - "k256 0.13.3", - "num_enum 0.7.2", + "k256 0.13.4", + "num_enum 0.7.3", "once_cell", "open-fastrlp", - "rand 0.8.5", + "rand", "rlp", "serde", "serde_json", "strum", - "syn 2.0.68", + "syn 2.0.79", "tempfile", "thiserror", "tiny-keccak", @@ -1601,7 +1681,7 @@ dependencies = [ "elliptic-curve 0.13.8", "eth-keystore", "ethers-core", - "rand 0.8.5", + "rand", "sha2", "thiserror", "tracing", @@ -1636,7 +1716,7 @@ dependencies = [ "tokio", "tracing", "walkdir", - "yansi", + "yansi 0.5.1", ] [[package]] @@ -1660,11 +1740,22 @@ dependencies = [ "once_cell", ] +[[package]] +name = "fancy-regex" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "531e46835a22af56d1e3b66f04844bed63158bc094a628bec1d321d9b4c44bf2" +dependencies = [ + "bit-set", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", +] + [[package]] name = "fastrand" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "e8c02a5121d4ea3eb16a80748c74f5549a5665e4c21333c6098f283870fbdea6" [[package]] name = "ff" @@ -1672,7 +1763,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -1682,22 +1773,10 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] -[[package]] -name = "ff_ce" -version = "0.14.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b538e4231443a5b9c507caee3356f016d832cf7393d2d90f03ea3180d4e3fbc" -dependencies = [ - "byteorder", - "hex", - "rand 0.4.6", - "serde", -] - [[package]] name = "fiat-crypto" version = "0.2.9" @@ -1723,7 +1802,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand 0.8.5", + "rand", "rustc-hex", "static_assertions", ] @@ -1736,9 +1815,9 @@ checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" [[package]] name = "flate2" -version = "1.0.30" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "a1b589b4dc103969ad3cf85c950899926ec64300a1a46d76c03a6072957036f0" dependencies = [ "crc32fast", "miniz_oxide", @@ -1795,12 +1874,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "fuchsia-cprng" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a06f77d526c1a601b7c4cdd98f54b5eaabffc14d5f2f0296febdc7f357c6d3ba" - [[package]] name = "funty" version = "2.0.0" @@ -1884,7 +1957,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -1954,15 +2027,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", ] [[package]] name = "gimli" -version = "0.29.0" +version = "0.31.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "32085ea23f3234fc7846555e85283ba4de91e21016dc0455a16286d87a292d64" [[package]] name = "git_version_macro" @@ -1996,7 +2071,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff 0.12.1", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2007,7 +2082,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ "ff 0.13.0", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2023,7 +2098,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.6", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -2032,9 +2107,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "524e8ac6999421f49a846c2d4411f337e53497d8ec55d67753beffa43c5d9205" dependencies = [ "atomic-waker", "bytes", @@ -2042,7 +2117,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.1.0", - "indexmap 2.2.6", + "indexmap 2.5.0", "slab", "tokio", "tokio-util", @@ -2174,9 +2249,9 @@ dependencies = [ [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http 1.1.0", @@ -2191,7 +2266,7 @@ dependencies = [ "bytes", "futures-util", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "pin-project-lite", ] @@ -2209,9 +2284,9 @@ checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" [[package]] name = "human-panic" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c5d0e9120f6bca6120d142c7ede1ba376dd6bf276d69dd3dbe6cbeb7824179" +checksum = "1c5a08ed290eac04006e21e63d32e90086b6182c7cd0452d10f4264def1fec9a" dependencies = [ "anstream", "anstyle", @@ -2220,14 +2295,14 @@ dependencies = [ "serde", "serde_derive", "toml", - "uuid 1.9.1", + "uuid 1.10.0", ] [[package]] name = "hyper" -version = "0.14.29" +version = "0.14.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f361cde2f109281a220d4307746cdfd5ee3f410da58a70377762396775634b33" +checksum = "a152ddd61dfaec7273fe8419ab357f33aee0d914c5f4efbf0d96fa749eea5ec9" dependencies = [ "bytes", "futures-channel", @@ -2249,16 +2324,16 @@ dependencies = [ [[package]] name = "hyper" -version = "1.4.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4fe55fb7a772d59a5ff1dfbff4fe0258d19b89fec4b233e75d35d5d2316badc" +checksum = "50dfd22e0e76d0f662d429a5f80fcaf3855009297eab6a0a9f8543834744ba05" dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "httparse", "httpdate", "itoa", @@ -2276,7 +2351,7 @@ checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", "http 0.2.12", - "hyper 0.14.29", + "hyper 0.14.30", "rustls 0.21.12", "tokio", "tokio-rustls 0.24.1", @@ -2284,15 +2359,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.27.2" +version = "0.27.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee4be2c948921a1a5320b629c4193916ed787a7f7f293fd3f7f5a6c9de74155" +checksum = "08afdbb5c31130e3034af566421053ab03787c640246a446327f550d11bcb333" dependencies = [ "futures-util", "http 1.1.0", - "hyper 1.4.0", + "hyper 1.4.1", "hyper-util", - "rustls 0.23.10", + "rustls 0.23.13", "rustls-pki-types", "tokio", "tokio-rustls 0.26.0", @@ -2305,7 +2380,7 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3203a961e5c83b6f5498933e78b6b263e208c197b63e9c6c53cc82ffd3f63793" dependencies = [ - "hyper 1.4.0", + "hyper 1.4.1", "hyper-util", "pin-project-lite", "tokio", @@ -2319,7 +2394,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6183ddfa99b85da61a140bea0efc93fdf56ceaa041b37d553518030827f9905" dependencies = [ "bytes", - "hyper 0.14.29", + "hyper 0.14.30", "native-tls", "tokio", "tokio-native-tls", @@ -2333,7 +2408,7 @@ checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" dependencies = [ "bytes", "http-body-util", - "hyper 1.4.0", + "hyper 1.4.1", "hyper-util", "native-tls", "tokio", @@ -2343,29 +2418,28 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.6" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ab92f4f49ee4fb4f997c784b7a2e0fa70050211e0b6a287f898c3c9785ca956" +checksum = "41296eb09f183ac68eec06e03cdbea2e759633d4067b2f6552fc2e009bcad08b" dependencies = [ "bytes", "futures-channel", "futures-util", "http 1.1.0", - "http-body 1.0.0", - "hyper 1.4.0", + "http-body 1.0.1", + "hyper 1.4.1", "pin-project-lite", "socket2", "tokio", - "tower", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "235e081f3925a06703c2d0117ea8b91f042756fd6e7a6e5d901e8ca1a996b220" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2456,9 +2530,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.6" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "68b900aa2f7301e21c36462b170ee99994de34dff39a4a6a528e80e7376d07e5" dependencies = [ "equivalent", "hashbrown 0.14.5", @@ -2497,15 +2571,15 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "187674a687eed5fe42285b40c6291f9a01517d415fad1c3cbc6a9f778af7fcd4" [[package]] name = "is_terminal_polyfill" -version = "1.70.0" +version = "1.70.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8478577c03552c21db0e2724ffb8986a5ce7af88107e6be5d2ee6e158c12800" +checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" [[package]] name = "itertools" @@ -2534,6 +2608,15 @@ dependencies = [ "either", ] +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +dependencies = [ + "either", +] + [[package]] name = "itoa" version = "1.0.11" @@ -2542,18 +2625,18 @@ checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "jobserver" -version = "0.1.31" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +checksum = "48d1dbcbbeb6a7fec7e059840aa538bd62aaccf972c7346c4d9d2059312853d0" dependencies = [ "libc", ] [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "1868808506b929d7b0cfa8f75951347aa71bb21144b7791bae35d9bccfcfe37a" dependencies = [ "wasm-bindgen", ] @@ -2586,9 +2669,9 @@ dependencies = [ [[package]] name = "k256" -version = "0.13.3" +version = "0.13.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "956ff9b67e26e1a6a866cb758f12c6f8746208489e3e4a4b5580802f2f0a587b" +checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", "ecdsa 0.16.9", @@ -2620,7 +2703,7 @@ dependencies = [ "lalrpop-util", "petgraph", "regex", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "string_cache", "term", "tiny-keccak", @@ -2634,7 +2717,30 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "507460a910eb7b32ee961886ff48539633b788a36b65692b95f225b844c82553" dependencies = [ - "regex-automata 0.4.7", + "regex-automata 0.4.8", +] + +[[package]] +name = "lazy-regex" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8d8e41c97e6bc7ecb552016274b99fbb5d035e8de288c582d9b933af6677bfda" +dependencies = [ + "lazy-regex-proc_macros", + "once_cell", + "regex", +] + +[[package]] +name = "lazy-regex-proc_macros" +version = "3.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76e1d8b05d672c53cb9c7b920bbba8783845ae4f0b076e02a3db1d02c81b4163" +dependencies = [ + "proc-macro2", + "quote", + "regex", + "syn 2.0.79", ] [[package]] @@ -2648,9 +2754,9 @@ dependencies = [ [[package]] name = "libc" -version = "0.2.155" +version = "0.2.159" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "561d97a539a36e26a9a5fad1ea11a3039a67714694aaa379433e580854bc3dc5" [[package]] name = "libm" @@ -2721,7 +2827,7 @@ dependencies = [ "proc-macro2", "quote", "regex-syntax 0.6.29", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -2790,7 +2896,7 @@ checksum = "49e7bc1560b95a3c4a25d03de42fe76ca718ab92d1a22a55b9b4cf67b3ae635c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -2807,22 +2913,23 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.7.4" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8a240ddb74feaf34a79a7add65a741f3167852fba007066dcac1ca548d89c08" +checksum = "e2d80299ef12ff69b16a84bb182e3b9df68b5a91574d3d4fa6e41b65deec4df1" dependencies = [ - "adler", + "adler2", ] [[package]] name = "mio" -version = "0.8.11" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "80e04d1dcff3aae0704555fe5fee3bcfaf3d1fdf8a7e521d5b9d2b42acb52cec" dependencies = [ + "hermit-abi", "libc", "wasi", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -2837,7 +2944,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ffa00dec017b5b1a8b7cf5e2c008bfda1aa7e0697ac1508b491fdf2622fb4d8" dependencies = [ - "rand 0.8.5", + "rand", ] [[package]] @@ -2863,6 +2970,12 @@ version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "650eef8c711430f1a879fdd01d4745a7deea475becfb90269c06775983bbf086" +[[package]] +name = "nohash-hasher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bf50223579dc7cdcfb3bfcacf7069ff68243f8c363f62ffa99cf000a6b9c451" + [[package]] name = "nom" version = "7.1.3" @@ -2920,7 +3033,7 @@ dependencies = [ "num-integer", "num-iter", "num-traits", - "rand 0.8.5", + "rand", "smallvec", "zeroize", ] @@ -3004,11 +3117,11 @@ dependencies = [ [[package]] name = "num_enum" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02339744ee7253741199f897151b38e72257d13802d4ee837285cc2990a90845" +checksum = "4e613fc340b2220f734a8595782c551f1250e969d87d3be1ae0579e8d4065179" dependencies = [ - "num_enum_derive 0.7.2", + "num_enum_derive 0.7.3", ] [[package]] @@ -3020,19 +3133,19 @@ dependencies = [ "proc-macro-crate 1.3.1", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] name = "num_enum_derive" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "681030a937600a36906c185595136d26abfebb4aa9c65701cefcaf8578bb982b" +checksum = "af1844ef2428cc3e1cb900be36181049ef3d3193c63e43026cfe202983b27a56" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -3043,18 +3156,21 @@ checksum = "830b246a0e5f20af87141b25c173cd1b609bd7779a4617d6ec582abaf90870f3" [[package]] name = "object" -version = "0.36.1" +version = "0.36.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "081b846d1d56ddfc18fdf1a922e4f6e07a11768ea1b92dec44e42b72712ccfce" +checksum = "084f1a5821ac4c651660a94a7153d27ac9d8a53736203f58b31945ded098070a" dependencies = [ "memchr", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "82881c4be219ab5faaf2ad5e5e5ecdff8c66bd7402ca3160975c93b24961afd1" +dependencies = [ + "portable-atomic", +] [[package]] name = "open-fastrlp" @@ -3104,7 +3220,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -3161,7 +3277,7 @@ dependencies = [ "bytes", "http 1.1.0", "opentelemetry", - "reqwest 0.12.5", + "reqwest 0.12.7", ] [[package]] @@ -3177,8 +3293,8 @@ dependencies = [ "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", - "prost 0.13.1", - "reqwest 0.12.5", + "prost 0.13.3", + "reqwest 0.12.7", "thiserror", "tokio", "tonic", @@ -3192,7 +3308,7 @@ checksum = "30ee9f20bff9c984511a02f082dc8ede839e4a9bf15cc2487c8d6fea5ad850d9" dependencies = [ "opentelemetry", "opentelemetry_sdk", - "prost 0.13.1", + "prost 0.13.3", "tonic", ] @@ -3216,7 +3332,7 @@ dependencies = [ "once_cell", "opentelemetry", "percent-encoding", - "rand 0.8.5", + "rand", "serde_json", "thiserror", "tokio", @@ -3275,7 +3391,7 @@ version = "3.6.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d830939c76d294956402033aee57a6da7b438f2294eb94864c37b0569053a42c" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2", "quote", "syn 1.0.109", @@ -3283,9 +3399,9 @@ dependencies = [ [[package]] name = "parking" -version = "2.2.0" +version = "2.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb813b8af86854136c6922af0598d719255ecb2179515e6e7730d468f05c9cae" +checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" @@ -3305,9 +3421,9 @@ checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" dependencies = [ "cfg-if", "libc", - "redox_syscall 0.5.2", + "redox_syscall", "smallvec", - "windows-targets 0.52.5", + "windows-targets 0.52.6", ] [[package]] @@ -3317,7 +3433,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -3386,7 +3502,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.2.6", + "indexmap 2.5.0", ] [[package]] @@ -3416,7 +3532,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" dependencies = [ "phf_shared 0.11.2", - "rand 0.8.5", + "rand", ] [[package]] @@ -3429,7 +3545,8 @@ dependencies = [ "phf_shared 0.11.2", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", + "unicase", ] [[package]] @@ -3448,6 +3565,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" dependencies = [ "siphasher", + "unicase", ] [[package]] @@ -3467,7 +3585,7 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -3515,15 +3633,15 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "953ec861398dccce10c670dfeaf3ec4911ca479e9c02154b3a215178c5f566f2" [[package]] name = "portable-atomic" -version = "1.6.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +checksum = "cc9c68a3f6da06753e9335d63e27f6b9754dd1920d941135b7ea8224f141adb2" [[package]] name = "powerfmt" @@ -3533,9 +3651,12 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "77957b295656769bb8ad2b6a6b09d897d94f05c41b069aede1fcdaa675eaea04" +dependencies = [ + "zerocopy", +] [[package]] name = "precomputed-hash" @@ -3543,14 +3664,24 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" +[[package]] +name = "pretty_assertions" +version = "1.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ae130e2f271fbc2ac3a40fb1d07180839cdbbe443c7a27e1e3c13c5cac0116d" +dependencies = [ + "diff", + "yansi 1.0.1", +] + [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "479cf940fbbb3426c32c5d5176f62ad57549a0bb84773423ba8be9d089f5faba" dependencies = [ "proc-macro2", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -3579,11 +3710,11 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "3.1.0" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d37c51ca738a55da99dc0c4a34860fd675453b8b36209178c2249bb13651284" +checksum = "8ecf48c7ca261d60b74ab1a7b20da18bede46776b2e55535cb958eb595c5fa7b" dependencies = [ - "toml_edit 0.21.1", + "toml_edit 0.22.22", ] [[package]] @@ -3597,9 +3728,9 @@ dependencies = [ [[package]] name = "prometheus-client" -version = "0.22.2" +version = "0.22.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1ca959da22a332509f2a73ae9e5f23f9dcfc31fd3a54d71f159495bd5909baa" +checksum = "504ee9ff529add891127c4827eb481bd69dc0ebc72e9a682e187db4caa60c3ca" dependencies = [ "dtoa", "itoa", @@ -3615,7 +3746,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -3627,10 +3758,10 @@ dependencies = [ "bitflags 2.6.0", "lazy_static", "num-traits", - "rand 0.8.5", + "rand", "rand_chacha", "rand_xorshift", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", "unarray", ] @@ -3646,12 +3777,12 @@ dependencies = [ [[package]] name = "prost" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13db3d3fde688c61e2446b4d843bc27a7e8af269a69440c0308021dc92333cc" +checksum = "7b0487d90e047de87f984913713b85c601c05609aad5b0df4b4573fbf69aa13f" dependencies = [ "bytes", - "prost-derive 0.13.1", + "prost-derive 0.13.3", ] [[package]] @@ -3671,7 +3802,7 @@ dependencies = [ "prost 0.12.6", "prost-types", "regex", - "syn 2.0.68", + "syn 2.0.79", "tempfile", ] @@ -3685,20 +3816,20 @@ dependencies = [ "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] name = "prost-derive" -version = "0.13.1" +version = "0.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18bec9b0adc4eba778b33684b7ba3e7137789434769ee3ce3930463ef904cfca" +checksum = "e9552f850d5f0964a4e4d0bf306459ac29323ddfbae05e35a7c0d35cb0803cc5" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.13.0", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -3764,9 +3895,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "b5b9d34b8991d19d98081b46eacdd8eb58c6f2b201139f7c5f643cc155a633af" dependencies = [ "proc-macro2", ] @@ -3777,19 +3908,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552840b97013b1a26992c11eac34bdd778e464601a4c2054b5f0bff7c6761293" -dependencies = [ - "fuchsia-cprng", - "libc", - "rand_core 0.3.1", - "rdrand", - "winapi", -] - [[package]] name = "rand" version = "0.8.5" @@ -3798,7 +3916,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -3808,24 +3926,9 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a6fdeb83b075e8266dcc8762c22776f6877a63111121f5f8c7411e5be7eed4b" -dependencies = [ - "rand_core 0.4.2", + "rand_core", ] -[[package]] -name = "rand_core" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c33a3c44ca05fa6f1807d8e6743f3824e8509beca625669633be0acbdf509dc" - [[package]] name = "rand_core" version = "0.6.4" @@ -3841,7 +3944,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d25bf25ec5ae4a3f1b92f929810509a2f53d7dca2f50b794ff57e3face536c8f" dependencies = [ - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -3864,38 +3967,20 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "rdrand" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2" -dependencies = [ - "rand_core 0.3.1", -] - [[package]] name = "redox_syscall" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4722d768eff46b75989dd134e5c353f0d6296e5aaa3132e776cbdb56be7731aa" -dependencies = [ - "bitflags 1.3.2", -] - -[[package]] -name = "redox_syscall" -version = "0.5.2" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c82cf8cff14456045f55ec4241383baeff27af886adb72ffb2162f99911de0fd" +checksum = "9b6dfecf2c74bce2466cabf93f6664d6998a69eb21e39f4207930065b27b771f" dependencies = [ "bitflags 2.6.0", ] [[package]] name = "redox_users" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" +checksum = "ba009ff324d1fc1b900bd1fdb31564febe58a8ccc8a6fdbb93b543d33b13ca43" dependencies = [ "getrandom", "libredox", @@ -3904,14 +3989,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.5" +version = "1.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b91213439dad192326a0d7c6ee3955910425f441d7038e0d6933b0aec5c4517f" +checksum = "38200e5ee88914975b69f657f0801b6f6dccafd44fd9326302a4aaeecfacb1d8" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.7", - "regex-syntax 0.8.4", + "regex-automata 0.4.8", + "regex-syntax 0.8.5", ] [[package]] @@ -3925,13 +4010,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38caf58cc5ef2fed281f89292ef23f6365465ed9a41b7a7754eb4e26496c92df" +checksum = "368758f23274712b504848e9d5a6f010445cc8b87a7cdb4d7cbee666c1288da3" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.4", + "regex-syntax 0.8.5", ] [[package]] @@ -3942,9 +4027,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a66a03ae7c801facd77a29370b4faec201768915ac14a721ba36f20bc9c209b" +checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" [[package]] name = "reqwest" @@ -3960,7 +4045,7 @@ dependencies = [ "h2 0.3.26", "http 0.2.12", "http-body 0.4.6", - "hyper 0.14.29", + "hyper 0.14.30", "hyper-rustls 0.24.2", "hyper-tls 0.5.0", "ipnet", @@ -3977,7 +4062,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "sync_wrapper 0.1.2", - "system-configuration", + "system-configuration 0.5.1", "tokio", "tokio-native-tls", "tokio-rustls 0.24.1", @@ -3987,14 +4072,14 @@ dependencies = [ "wasm-bindgen-futures", "web-sys", "webpki-roots", - "winreg 0.50.0", + "winreg", ] [[package]] name = "reqwest" -version = "0.12.5" +version = "0.12.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7d6d2a27d57148378eb5e111173f4276ad26340ecc5c49a4a2152167a2d6a37" +checksum = "f8f4955649ef5c38cc7f9e8aa41761d48fb9677197daea9984dc54f56aad5e63" dependencies = [ "base64 0.22.1", "bytes", @@ -4002,12 +4087,12 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.4.0", - "hyper-rustls 0.27.2", + "hyper 1.4.1", + "hyper-rustls 0.27.3", "hyper-tls 0.6.0", "hyper-util", "ipnet", @@ -4018,12 +4103,12 @@ dependencies = [ "once_cell", "percent-encoding", "pin-project-lite", - "rustls-pemfile 2.1.2", + "rustls-pemfile 2.1.3", "serde", "serde_json", "serde_urlencoded", "sync_wrapper 1.0.1", - "system-configuration", + "system-configuration 0.6.1", "tokio", "tokio-native-tls", "tower-service", @@ -4031,7 +4116,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.52.0", + "windows-registry", ] [[package]] @@ -4129,7 +4214,7 @@ dependencies = [ "num-traits", "pkcs1", "pkcs8 0.10.2", - "rand_core 0.6.4", + "rand_core", "signature 2.2.0", "spki 0.7.3", "subtle", @@ -4142,6 +4227,12 @@ version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +[[package]] +name = "rustc-hash" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "583034fd73374156e66797ed8e5b0d5690409c9226b22d87cb7f19821c05d152" + [[package]] name = "rustc-hex" version = "2.1.0" @@ -4150,18 +4241,18 @@ checksum = "3e75f6a532d0fd9f7f13144f392b6ad56a32696bfcd9c78f797f16bbb6f072d6" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.38.34" +version = "0.38.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "8acb788b847c24f28525660c4d7758620a7210875711f79e7f663cc152726811" dependencies = [ "bitflags 2.6.0", "errno", @@ -4184,13 +4275,13 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.10" +version = "0.23.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05cff451f60db80f490f3c182b77c35260baace73209e9cdbbe526bfe3a4d402" +checksum = "f2dabaac7466917e566adb06783a81ca48944c6898a1b08b9374106dd671f4c8" dependencies = [ "once_cell", "rustls-pki-types", - "rustls-webpki 0.102.4", + "rustls-webpki 0.102.8", "subtle", "zeroize", ] @@ -4206,9 +4297,9 @@ dependencies = [ [[package]] name = "rustls-pemfile" -version = "2.1.2" +version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "196fe16b00e106300d3e45ecfcb764fa292a535d7326a29a5875c579c7417425" dependencies = [ "base64 0.22.1", "rustls-pki-types", @@ -4216,9 +4307,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.7.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" +checksum = "0e696e35370c65c9c541198af4543ccd580cf17fc25d8e05c5a242b202488c55" [[package]] name = "rustls-webpki" @@ -4232,9 +4323,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.102.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" dependencies = [ "ring 0.17.8", "rustls-pki-types", @@ -4289,7 +4380,7 @@ version = "2.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d35494501194174bda522a32605929eefc9ecf7e0a326c26db1fdd85881eb62" dependencies = [ - "proc-macro-crate 3.1.0", + "proc-macro-crate 3.2.0", "proc-macro2", "quote", "syn 1.0.109", @@ -4297,11 +4388,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "e9aaafd5a2b6e3d657ff009d82fbd630b6bd54dd4eb06f21693925cdf80f9b8b" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -4389,9 +4480,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.0" +version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ "bitflags 2.6.0", "core-foundation", @@ -4402,9 +4493,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "ea4a292869320c0272d7bc55a5a6aafaff59b4f63404a003887b679a2e05b4b6" dependencies = [ "core-foundation-sys", "libc", @@ -4483,7 +4574,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "901f761681f97db3db836ef9e094acdd8756c40215326c194201941947164ef1" dependencies = [ "once_cell", - "rand 0.8.5", + "rand", "sentry-types", "serde", "serde_json", @@ -4530,20 +4621,20 @@ checksum = "da956cca56e0101998c8688bc65ce1a96f00673a0e58e663664023d4c7911e82" dependencies = [ "debugid", "hex", - "rand 0.8.5", + "rand", "serde", "serde_json", "thiserror", "time", "url", - "uuid 1.9.1", + "uuid 1.10.0", ] [[package]] name = "serde" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "c8e3592472072e6e22e0a54d5904d9febf8508f65fb8552499a1abc7d1078c3a" dependencies = [ "serde_derive", ] @@ -4560,31 +4651,32 @@ dependencies = [ [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.210" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "243902eda00fad750862fc144cea25caca5e20d615af0a81bee94ca738f1df1f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] name = "serde_json" -version = "1.0.120" +version = "1.0.128" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e0d21c9a8cae1235ad58a00c11cb40d4b1e5c784f1ef2c537876ed6ffd8b7c5" +checksum = "6ff5456707a1de34e7e37f2a6fd3d3f808c318259cbd01ab6377795054b483d8" dependencies = [ "itoa", + "memchr", "ryu", "serde", ] [[package]] name = "serde_spanned" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79e674e01f999af37c49f70a6ede167a8a60b2503e56c5599532a65baa5969a0" +checksum = "87607cb1398ed59d48732e575a4c28a7a8ebf2454b964fe3f224f2afc07909e1" dependencies = [ "serde", ] @@ -4607,6 +4699,7 @@ version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "678b5a069e50bf00ecd22d0cd8ddf7c236f68581b03db652061ed5eb13a312ff" dependencies = [ + "hex", "serde", "serde_with_macros", ] @@ -4629,7 +4722,7 @@ version = "0.9.34+deprecated" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.5.0", "itoa", "ryu", "serde", @@ -4720,7 +4813,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -4730,7 +4823,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77549399552de45a898a580c1b41d445bf730df867cc44e6c0233bbc4b8329de" dependencies = [ "digest", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -4770,6 +4863,12 @@ dependencies = [ "nanoid", ] +[[package]] +name = "slyce" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046d1c67b37db818d93f1d04a924ddbd7396c96cf9cfaa593d038259ae484729" + [[package]] name = "smallvec" version = "1.13.2" @@ -4785,6 +4884,16 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b7c388c1b5e93756d0c740965c41e8822f866621d41acbdf6336a6a168f8840c" +[[package]] +name = "smol_str" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66eaf762c5af19db3108300515c8aa7a50efc90ff745f4c62288052ebf9fdd25" +dependencies = [ + "borsh", + "serde", +] + [[package]] name = "socket2" version = "0.5.7" @@ -4844,11 +4953,17 @@ dependencies = [ "der 0.7.9", ] +[[package]] +name = "sptr" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b9b39299b249ad65f3b7e96443bad61c02ca5cd3589f46cb6d610a0fd6c0d6a" + [[package]] name = "sqlformat" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f895e3734318cc55f1fe66258926c9b910c124d47520339efecbb6c59cec7c1f" +checksum = "7bba3a93db0cc4f7bdece8bb09e77e2e785c20bfebf79eb8340ed80708048790" dependencies = [ "nom", "unicode_categories", @@ -4856,9 +4971,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcfa89bea9500db4a0d038513d7a060566bfc51d46d1c014847049a45cce85e8" +checksum = "93334716a037193fac19df402f8571269c84a00852f6a7066b5d2616dcd64d3e" dependencies = [ "sqlx-core", "sqlx-macros", @@ -4869,9 +4984,9 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d06e2f2bd861719b1f3f0c7dbe1d80c30bf59e76cf019f07d9014ed7eefb8e08" +checksum = "d4d8060b456358185f7d50c55d9b5066ad956956fddec42ee2e8567134a8936e" dependencies = [ "atoi", "byteorder", @@ -4888,7 +5003,7 @@ dependencies = [ "hashbrown 0.14.5", "hashlink", "hex", - "indexmap 2.2.6", + "indexmap 2.5.0", "log", "memchr", "once_cell", @@ -4908,22 +5023,22 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f998a9defdbd48ed005a89362bd40dd2117502f15294f61c8d47034107dbbdc" +checksum = "cac0692bcc9de3b073e8d747391827297e075c7710ff6276d9f7a1f3d58c6657" dependencies = [ "proc-macro2", "quote", "sqlx-core", "sqlx-macros-core", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] name = "sqlx-macros-core" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d100558134176a2629d46cec0c8891ba0be8910f7896abfdb75ef4ab6f4e7ce" +checksum = "1804e8a7c7865599c9c79be146dc8a9fd8cc86935fa641d3ea58e5f0688abaa5" dependencies = [ "dotenvy", "either", @@ -4939,7 +5054,7 @@ dependencies = [ "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", - "syn 2.0.68", + "syn 2.0.79", "tempfile", "tokio", "url", @@ -4947,9 +5062,9 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "936cac0ab331b14cb3921c62156d913e4c15b74fb6ec0f3146bd4ef6e4fb3c12" +checksum = "64bb4714269afa44aef2755150a0fc19d756fb580a67db8885608cf02f47d06a" dependencies = [ "atoi", "base64 0.22.1", @@ -4974,7 +5089,7 @@ dependencies = [ "memchr", "once_cell", "percent-encoding", - "rand 0.8.5", + "rand", "rsa", "serde", "sha1", @@ -4989,9 +5104,9 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9734dbce698c67ecf67c442f768a5e90a49b2a4d61a9f1d59f73874bd4cf0710" +checksum = "6fa91a732d854c5d7726349bb4bb879bb9478993ceb764247660aee25f67c2f8" dependencies = [ "atoi", "base64 0.22.1", @@ -5013,7 +5128,7 @@ dependencies = [ "md-5", "memchr", "once_cell", - "rand 0.8.5", + "rand", "serde", "serde_json", "sha2", @@ -5027,9 +5142,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75b419c3c1b1697833dd927bdc4c6545a620bc1bbafabd44e1efbe9afcd337e" +checksum = "d5b2cf34a45953bfd3daaf3db0f7a7878ab9b7a6b91b422d24a7a9e4c857b680" dependencies = [ "atoi", "flume", @@ -5048,6 +5163,72 @@ dependencies = [ "url", ] +[[package]] +name = "sqruff-lib" +version = "0.18.2" +source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +dependencies = [ + "ahash", + "anstyle", + "append-only-vec", + "common-path", + "configparser", + "dyn-clone", + "enum_dispatch", + "fancy-regex", + "getrandom", + "indexmap 2.5.0", + "itertools 0.13.0", + "lazy-regex", + "nohash-hasher", + "phf", + "pretty_assertions", + "rayon", + "regex", + "rustc-hash", + "smol_str", + "sqruff-lib-core", + "sqruff-lib-dialects", + "strum", + "strum_macros", + "tikv-jemallocator", + "tracing", + "unicase", + "walkdir", +] + +[[package]] +name = "sqruff-lib-core" +version = "0.18.2" +source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +dependencies = [ + "ahash", + "enum_dispatch", + "fancy-regex", + "indexmap 2.5.0", + "itertools 0.13.0", + "nohash-hasher", + "pretty_assertions", + "rustc-hash", + "slyce", + "smol_str", + "sptr", + "strum", + "strum_macros", +] + +[[package]] +name = "sqruff-lib-dialects" +version = "0.18.2" +source = "git+https://github.com/quarylabs/sqruff#1ccf18a620b93438c0c6b4f9fc88f402f45a1b29" +dependencies = [ + "ahash", + "itertools 0.13.0", + "serde_yaml", + "sqruff-lib-core", + "strum", +] + [[package]] name = "stable_deref_trait" version = "1.2.0" @@ -5115,7 +5296,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -5157,9 +5338,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.68" +version = "2.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" +checksum = "89132cd0bf050864e1d38dc3bbc07a0eb8e7530af26344d3d2bbbef83499f590" dependencies = [ "proc-macro2", "quote", @@ -5177,6 +5358,9 @@ name = "sync_wrapper" version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a7065abeca94b6a8a577f9bd45aa0867a2238b74e8eb67cf10d492bc39351394" +dependencies = [ + "futures-core", +] [[package]] name = "system-configuration" @@ -5186,7 +5370,18 @@ checksum = "ba3a3adc5c275d719af8cb4272ea1c4a6d668a777f37e115f6d11ddbc1c8e0e7" dependencies = [ "bitflags 1.3.2", "core-foundation", - "system-configuration-sys", + "system-configuration-sys 0.5.0", +] + +[[package]] +name = "system-configuration" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" +dependencies = [ + "bitflags 2.6.0", + "core-foundation", + "system-configuration-sys 0.6.0", ] [[package]] @@ -5199,6 +5394,16 @@ dependencies = [ "libc", ] +[[package]] +name = "system-configuration-sys" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e1d1b10ced5ca923a1fcb8d03e96b8d3268065d724548c0211415ff6ac6bac4" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "tap" version = "1.0.1" @@ -5207,14 +5412,15 @@ checksum = "55937e1799185b12863d447f42597ed69d9928686b8d88a1df17376a097d8369" [[package]] name = "tempfile" -version = "3.10.1" +version = "3.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "f0f2c9fc62d0beef6951ccffd757e241266a2c833136efbe35af6cd2567dca5b" dependencies = [ "cfg-if", "fastrand", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -5251,22 +5457,22 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.61" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "d50af8abc119fb8bb6dbabcfa89656f46f84aa0ac7688088608076ad2b459a84" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "1.0.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "08904e7672f5eb876eaaf87e0ce17857500934f4981c4a0ab2b4aa98baac7fc3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -5288,6 +5494,26 @@ dependencies = [ "num_cpus", ] +[[package]] +name = "tikv-jemalloc-sys" +version = "0.6.0+5.3.0-1-ge13ca993e8ccb9ba9847cc330696e02839f328f7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd3c60906412afa9c2b5b5a48ca6a5abe5736aec9eb48ad05037a677e52e4e2d" +dependencies = [ + "cc", + "libc", +] + +[[package]] +name = "tikv-jemallocator" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cec5ff18518d81584f477e9bfdf957f5bb0979b0bac3af4ca30b5b3ae2d2865" +dependencies = [ + "libc", + "tikv-jemalloc-sys", +] + [[package]] name = "time" version = "0.3.36" @@ -5330,9 +5556,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c55115c6fbe2d2bef26eb09ad74bde02d8255476fc0c7b515ef09fbb35742d82" +checksum = "445e881f4f6d382d5f27c034e25eb92edd7c784ceab92a0937db7f2e9471b938" dependencies = [ "tinyvec_macros", ] @@ -5345,32 +5571,31 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.40.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "e2b070231665d27ad9ec9b8df639893f46727666c6767db40317fbe920a5d998" dependencies = [ "backtrace", "bytes", "libc", "mio", - "num_cpus", "parking_lot", "pin-project-lite", "signal-hook-registry", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "693d596312e88961bc67d7f1f97af8a70227d9f90c31bba5806eec004978d752" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -5399,16 +5624,16 @@ version = "0.26.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0c7bc40d0e5a97695bb96e27995cd3a08538541b0a846f65bba7a359f36700d4" dependencies = [ - "rustls 0.23.10", + "rustls 0.23.13", "rustls-pki-types", "tokio", ] [[package]] name = "tokio-stream" -version = "0.1.15" +version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" +checksum = "4f4e6ce100d0eb49a2734f8c0812bcd324cf357d21810932c5df6b96ef2b86f1" dependencies = [ "futures-core", "pin-project-lite", @@ -5432,9 +5657,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "61e7c3654c13bcd040d4a03abee2c75b1d14a37b423cf5a813ceae1cc903ec6a" dependencies = [ "bytes", "futures-core", @@ -5445,21 +5670,21 @@ dependencies = [ [[package]] name = "toml" -version = "0.8.14" +version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f49eb2ab21d2f26bd6db7bf383edc527a7ebaee412d17af4d40fdccd442f335" +checksum = "a1ed1f98e3fdc28d6d910e6737ae6ab1a93bf1985935a1193e68f93eeb68d24e" dependencies = [ "serde", "serde_spanned", "toml_datetime", - "toml_edit 0.22.14", + "toml_edit 0.22.22", ] [[package]] name = "toml_datetime" -version = "0.6.6" +version = "0.6.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4badfd56924ae69bcc9039335b2e017639ce3f9b001c393c1b2d1ef846ce2cbf" +checksum = "0dd7358ecb8fc2f8d014bf86f6f638ce72ba252a2c3a2572f2a795f1d23efb41" dependencies = [ "serde", ] @@ -5470,60 +5695,49 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.6", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.21.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a8534fd7f78b5405e860340ad6575217ce99f38d4d5c8f2442cb5ecb50090e1" -dependencies = [ - "indexmap 2.2.6", + "indexmap 2.5.0", "toml_datetime", "winnow 0.5.40", ] [[package]] name = "toml_edit" -version = "0.22.14" +version = "0.22.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f21c7aaf97f1bd9ca9d4f9e73b0a6c74bd5afef56f2bc931943a6e1c37e04e38" +checksum = "4ae48d6208a266e853d946088ed816055e556cc6028c5e8e2b84d9fa5dd7c7f5" dependencies = [ - "indexmap 2.2.6", + "indexmap 2.5.0", "serde", "serde_spanned", "toml_datetime", - "winnow 0.6.13", + "winnow 0.6.20", ] [[package]] name = "tonic" -version = "0.12.1" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38659f4a91aba8598d27821589f5db7dddd94601e7a01b1e485a50e5484c7401" +checksum = "877c5b330756d856ffcc4553ab34a5684481ade925ecc54bcd1bf02b1d0d4d52" dependencies = [ "async-stream", "async-trait", "axum", "base64 0.22.1", "bytes", - "h2 0.4.5", + "h2 0.4.6", "http 1.1.0", - "http-body 1.0.0", + "http-body 1.0.1", "http-body-util", - "hyper 1.4.0", + "hyper 1.4.1", "hyper-timeout", "hyper-util", "percent-encoding", "pin-project", - "prost 0.13.1", + "prost 0.13.3", "socket2", "tokio", "tokio-stream", - "tower", + "tower 0.4.13", "tower-layer", "tower-service", "tracing", @@ -5540,7 +5754,7 @@ dependencies = [ "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand 0.8.5", + "rand", "slab", "tokio", "tokio-util", @@ -5549,17 +5763,31 @@ dependencies = [ "tracing", ] +[[package]] +name = "tower" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2873938d487c3cfb9aed7546dc9f2711d867c9f90c46b889989a2cb84eba6b4f" +dependencies = [ + "futures-core", + "futures-util", + "pin-project-lite", + "sync_wrapper 0.1.2", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" @@ -5581,7 +5809,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -5683,7 +5911,7 @@ dependencies = [ "http 0.2.12", "httparse", "log", - "rand 0.8.5", + "rand", "rustls 0.21.12", "sha1", "thiserror", @@ -5736,6 +5964,15 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "eaea85b334db583fe3274d12b4cd1880032beab409c0d774be044d4480ab9a94" +[[package]] +name = "unicase" +version = "2.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f7d2d4dafb69621809a81864c9c1b864479e1235c0dd4e199924b9742439ed89" +dependencies = [ + "version_check", +] + [[package]] name = "unicode-bidi" version = "0.3.15" @@ -5744,9 +5981,9 @@ checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" +checksum = "e91b56cd4cadaeb79bbf1a5645f6b4f8dc5bde8834ad5894a8db35fda9efa1fe" [[package]] name = "unicode-linebreak" @@ -5756,30 +5993,30 @@ checksum = "3b09c83c3c29d37506a3e260c08c03743a6bb66a9cd432c6934ab501a190571f" [[package]] name = "unicode-normalization" -version = "0.1.23" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" +checksum = "5033c97c4262335cded6d6fc3e5c18ab755e1a3dc96376350f3d8e9f009ad956" dependencies = [ "tinyvec", ] [[package]] name = "unicode-properties" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4259d9d4425d9f0661581b804cb85fe66a4c631cadd8f490d1c13a35d5d9291" +checksum = "52ea75f83c0137a9b98608359a5f1af8144876eb67bcb1ce837368e906a9f524" [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.1.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" [[package]] name = "unicode-xid" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "unicode_categories" @@ -5807,9 +6044,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "ureq" -version = "2.9.7" +version = "2.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d11a831e3c0b56e438a28308e7c810799e3c118417f342d30ecec080105395cd" +checksum = "b74fc6b57825be3373f7054754755f03ac3a8f5d70015ccad699ba2029956f4a" dependencies = [ "base64 0.22.1", "log", @@ -5854,9 +6091,9 @@ dependencies = [ [[package]] name = "uuid" -version = "1.9.1" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" +checksum = "81dfa00651efa65069b0b6b651f4aaa31ba9e3c3ce0137aaad053604ee7e0314" dependencies = [ "getrandom", "serde", @@ -5876,9 +6113,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "vise" @@ -5900,7 +6137,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "671d3b894d5d0849f0a597f56bf071f42d4f2a1cbcf2f78ca21f870ab7c0cc2b" dependencies = [ - "hyper 0.14.29", + "hyper 0.14.30", "once_cell", "tokio", "tracing", @@ -5915,7 +6152,7 @@ checksum = "6a511871dc5de990a3b2a0e715facfbc5da848c0c0395597a1415029fb7c250a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -5951,34 +6188,35 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "a82edfc16a6c469f5f44dc7b571814045d60404b55a0ee849f9bcfa2e63dd9b5" dependencies = [ "cfg-if", + "once_cell", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "9de396da306523044d3302746f1208fa71d7532227f15e347e2d93e4145dd77b" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "61e9300f63a621e96ed275155c108eb6f843b6a26d053f122ab69724559dc8ed" dependencies = [ "cfg-if", "js-sys", @@ -5988,9 +6226,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "585c4c91a46b072c92e908d99cb1dcdf95c5218eeb6f3bf1efa991ee7a68cccf" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5998,28 +6236,28 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "afc340c74d9005395cf9dd098506f7f44e38f2b4a21c6aaacf9a105ea5e1e836" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.93" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "c62a0a307cb4a311d3a07867860911ca130c3494e8c2719593806c08bc5d0484" [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.70" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "26fdeaafd9bd129f65e7c031593c24d62186301e0c72c8978fa1678be7d532c0" dependencies = [ "js-sys", "wasm-bindgen", @@ -6043,11 +6281,11 @@ checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" [[package]] name = "whoami" -version = "1.5.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +checksum = "372d5b87f58ec45c384ba03563b03544dc5fadc3983e434b286913f5b4a9bb6d" dependencies = [ - "redox_syscall 0.4.1", + "redox_syscall", "wasite", ] @@ -6069,11 +6307,11 @@ checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" [[package]] name = "winapi-util" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +checksum = "cf221c93e13a30d793f7645a0e7762c55d169dbb0a49671918a2319d289b10bb" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] @@ -6088,7 +6326,37 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-registry" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e400001bb720a623c1c69032f8e3e4cf09984deec740f007dd2b03ec864804b0" +dependencies = [ + "windows-result", + "windows-strings", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-result" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d1043d8214f791817bab27572aaa8af63732e11bf84aa21a45a78d6c317ae0e" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-strings" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd9b125c486025df0eabcb585e62173c6c9eddcec5d117d3b6e8c30e2ee4d10" +dependencies = [ + "windows-result", + "windows-targets 0.52.6", ] [[package]] @@ -6106,7 +6374,16 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", ] [[package]] @@ -6126,18 +6403,18 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] @@ -6148,9 +6425,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_msvc" @@ -6160,9 +6437,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_i686_gnu" @@ -6172,15 +6449,15 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_msvc" @@ -6190,9 +6467,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_x86_64_gnu" @@ -6202,9 +6479,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnullvm" @@ -6214,9 +6491,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_msvc" @@ -6226,9 +6503,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "winnow" @@ -6241,9 +6518,9 @@ dependencies = [ [[package]] name = "winnow" -version = "0.6.13" +version = "0.6.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59b5e5f6c299a3c7890b876a2a587f3115162487e704907d9b6cd29473052ba1" +checksum = "36c1fec1a2bb5866f07c25f68c26e565c4c200aebb96d7e55710c19d3e8ac49b" dependencies = [ "memchr", ] @@ -6258,16 +6535,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "winreg" -version = "0.52.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" -dependencies = [ - "cfg-if", - "windows-sys 0.48.0", -] - [[package]] name = "ws_stream_wasm" version = "0.7.4" @@ -6317,24 +6584,31 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" +[[package]] +name = "yansi" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfe53a6657fd280eaa890a3bc59152892ffa3e30101319d168b781ed6529b049" + [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" dependencies = [ + "byteorder", "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.7.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -6354,7 +6628,7 @@ checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -6416,8 +6690,8 @@ dependencies = [ "cliclack", "common", "config", - "console", "ethers", + "eyre", "human-panic", "lazy_static", "secrecy", @@ -6454,6 +6728,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "sqruff-lib", "strum", "tokio", "types", @@ -6485,7 +6760,8 @@ dependencies = [ "chrono", "ethabi", "hex", - "num_enum 0.7.2", + "num_enum 0.7.3", + "secrecy", "serde", "serde_json", "serde_with", @@ -6497,14 +6773,14 @@ dependencies = [ [[package]] name = "zksync_concurrency" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1c8cf6c689ab5922b52d81b775cd2d9cffbfc8fb8da65985e11b06546dfb3bf" +checksum = "a4724d51934e475c846ba9e6ed169e25587385188b928a9ecfbbf616092a1c17" dependencies = [ "anyhow", "once_cell", "pin-project", - "rand 0.8.5", + "rand", "sha3", "thiserror", "time", @@ -6519,7 +6795,7 @@ name = "zksync_config" version = "0.1.0" dependencies = [ "anyhow", - "rand 0.8.5", + "rand", "secrecy", "serde", "url", @@ -6531,21 +6807,19 @@ dependencies = [ [[package]] name = "zksync_consensus_crypto" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7baced4e811015038322dad10239f2d631d9e339e8d6b7b6e6b146bee30f41" +checksum = "a7760e7a140f16f0435fbf2ad9a4b09feaad74568d05b553751d222f4803a42e" dependencies = [ "anyhow", "blst", "ed25519-dalek", "elliptic-curve 0.13.8", - "ff_ce", "hex", - "k256 0.13.3", + "k256 0.13.4", "num-bigint", "num-traits", - "rand 0.4.6", - "rand 0.8.5", + "rand", "sha3", "thiserror", "tracing", @@ -6554,16 +6828,16 @@ dependencies = [ [[package]] name = "zksync_consensus_roles" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0aab4ddf62f6001903c5fe9f65afb1bdc42464928c9d1c6ce52e4d7e9944f5dc" +checksum = "96f903187836210602beba27655e111e22efb229ef90bd2a95a3d6799b31685c" dependencies = [ "anyhow", "bit-vec", "hex", "num-bigint", "prost 0.12.6", - "rand 0.8.5", + "rand", "serde", "thiserror", "tracing", @@ -6576,12 +6850,12 @@ dependencies = [ [[package]] name = "zksync_consensus_utils" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29e69dffc0fbc7c096548c997f5ca157a490b34b3d49fd524fa3d51840f7fb22" +checksum = "1020308512c01ab80327fb874b5b61c6fd513a6b26c8a5fce3e077600da04e4b" dependencies = [ "anyhow", - "rand 0.8.5", + "rand", "thiserror", "zksync_concurrency", ] @@ -6606,7 +6880,7 @@ dependencies = [ "anyhow", "blake2", "hex", - "rand 0.8.5", + "rand", "secp256k1", "serde", "serde_json", @@ -6627,9 +6901,9 @@ dependencies = [ [[package]] name = "zksync_protobuf" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df5467dfe2f845ca1fd6ceec623bbd32187589793d3c4023dcd2f5172369d198" +checksum = "1d2d9ce9b9697daae6023c8da5cfe8764690a9d9c91ff32b8e1e54a7c8301fb3" dependencies = [ "anyhow", "bit-vec", @@ -6637,7 +6911,7 @@ dependencies = [ "prost 0.12.6", "prost-reflect", "quick-protobuf", - "rand 0.8.5", + "rand", "serde", "serde_json", "serde_yaml", @@ -6648,9 +6922,9 @@ dependencies = [ [[package]] name = "zksync_protobuf_build" -version = "0.1.1" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33d35280660b11be2a4ebdf531184eb729acebfdc3368d27176ec104f8bf9c5f" +checksum = "903c23a12e160a703f9b68d0dd961daa24156af912ca1bc9efb74969f3acc645" dependencies = [ "anyhow", "heck", @@ -6660,7 +6934,7 @@ dependencies = [ "prost-reflect", "protox", "quote", - "syn 2.0.68", + "syn 2.0.79", ] [[package]] @@ -6670,7 +6944,7 @@ dependencies = [ "anyhow", "hex", "prost 0.12.6", - "rand 0.8.5", + "rand", "secrecy", "serde_json", "serde_yaml", @@ -6699,17 +6973,18 @@ dependencies = [ "bigdecimal", "blake2", "chrono", - "derive_more 1.0.0-beta.6", + "derive_more 1.0.0", "hex", "itertools 0.10.5", "num", - "num_enum 0.7.2", + "num_enum 0.7.3", "once_cell", "prost 0.12.6", "rlp", "secp256k1", "serde", "serde_json", + "serde_with", "strum", "thiserror", "tracing", @@ -6734,7 +7009,7 @@ dependencies = [ "hex", "num", "once_cell", - "reqwest 0.12.5", + "reqwest 0.12.7", "serde", "serde_json", "thiserror", @@ -6791,9 +7066,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.11+zstd.1.5.6" +version = "2.0.13+zstd.1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75652c55c0b6f3e6f12eb786fe1bc960396bf05a1eb3bf1f3691c3610ac2e6d4" +checksum = "38ff0f21cfee8f97d94cef41359e0c89aa6113028ab0291aa8ca0038995a95aa" dependencies = [ "cc", "pkg-config", diff --git a/zk_toolbox/Cargo.toml b/zk_toolbox/Cargo.toml index d8b84f93add..e6aec659bd6 100644 --- a/zk_toolbox/Cargo.toml +++ b/zk_toolbox/Cargo.toml @@ -32,9 +32,9 @@ git_version_macro = { path = "crates/git_version_macro" } zksync_config = { path = "../core/lib/config" } zksync_protobuf_config = { path = "../core/lib/protobuf_config" } zksync_basic_types = { path = "../core/lib/basic_types" } -zksync_consensus_roles = "=0.1.1" -zksync_consensus_crypto = "=0.1.1" -zksync_protobuf = "=0.1.1" +zksync_consensus_roles = "=0.3.0" +zksync_consensus_crypto = "=0.3.0" +zksync_protobuf = "=0.3.0" # External dependencies anyhow = "1.0.82" @@ -43,6 +43,7 @@ slugify-rs = "0.0.3" cliclack = "0.2.5" console = "0.15.8" chrono = "0.4.38" +eyre = "0.6.12" ethers = "2.0" futures = "0.3.30" human-panic = "2.0" diff --git a/zk_toolbox/README.md b/zk_toolbox/README.md index a3b44fa98b3..6197a79eec9 100644 --- a/zk_toolbox/README.md +++ b/zk_toolbox/README.md @@ -183,12 +183,6 @@ Initialize the prover: zk_inception prover init ``` -Generate setup keys: - -```bash -zk_inception prover generate-sk -``` - Run the prover: ```bash diff --git a/zk_toolbox/crates/common/src/cmd.rs b/zk_toolbox/crates/common/src/cmd.rs index 7bf0147b69c..130a3b2c100 100644 --- a/zk_toolbox/crates/common/src/cmd.rs +++ b/zk_toolbox/crates/common/src/cmd.rs @@ -147,10 +147,7 @@ impl<'a> Cmd<'a> { fn check_output_status(command_text: &str, output: &std::process::Output) -> CmdResult<()> { if !output.status.success() { logger::new_line(); - logger::error_note( - &format!("Command failed to run: {}", command_text), - &log_output(output), - ); + logger::error_note("Command failed to run", &log_output(output)); return Err(CmdError { stderr: Some(String::from_utf8(output.stderr.clone())?), source: anyhow::anyhow!("Command failed to run: {}", command_text), diff --git a/zk_toolbox/crates/common/src/db.rs b/zk_toolbox/crates/common/src/db.rs index eda5471170d..bceeb000982 100644 --- a/zk_toolbox/crates/common/src/db.rs +++ b/zk_toolbox/crates/common/src/db.rs @@ -126,7 +126,7 @@ pub async fn migrate_db( let text = if skip { "Skipped" } else { "Applied" }; if global_config().verbose { - logger::step(&format!( + logger::step(format!( " {} {}/{} {} ({elapsed:?})", text, migration.version, diff --git a/zk_toolbox/crates/common/src/ethereum.rs b/zk_toolbox/crates/common/src/ethereum.rs index 4f000ed0fd5..33caaad9789 100644 --- a/zk_toolbox/crates/common/src/ethereum.rs +++ b/zk_toolbox/crates/common/src/ethereum.rs @@ -10,7 +10,7 @@ use ethers::{ }; use types::TokenInfo; -use crate::wallets::Wallet; +use crate::{logger, wallets::Wallet}; pub fn create_ethers_client( private_key: H256, @@ -103,13 +103,12 @@ pub async fn mint_token( let mut pending_txs = vec![]; for call in &pending_calls { - pending_txs.push( - call.send() - .await? - // It's safe to set such low number of confirmations and low interval for localhost - .confirmations(3) - .interval(Duration::from_millis(30)), - ); + let call = call.send().await; + match call { + // It's safe to set such low number of confirmations and low interval for localhost + Ok(call) => pending_txs.push(call.confirmations(3).interval(Duration::from_millis(30))), + Err(e) => logger::error(format!("Minting is not successful {e}")), + } } futures::future::join_all(pending_txs).await; diff --git a/zk_toolbox/crates/common/src/term/logger.rs b/zk_toolbox/crates/common/src/term/logger.rs index 33a88bd961e..17e518d9ad9 100644 --- a/zk_toolbox/crates/common/src/term/logger.rs +++ b/zk_toolbox/crates/common/src/term/logger.rs @@ -56,10 +56,9 @@ pub fn note(msg: impl Display, content: impl Display) { } pub fn error_note(msg: &str, content: &str) { - let symbol = CliclackTheme.state_symbol(&ThemeState::Submit); - let note = CliclackTheme - .format_note(msg, content) - .replace(&symbol, &CliclackTheme.error_symbol()); + let note = CliclackTheme.format_log(msg, &CliclackTheme.error_symbol()); + term_write(note); + let note = CliclackTheme.format_log(content, &CliclackTheme.error_symbol()); term_write(note); } diff --git a/zk_toolbox/crates/config/src/ecosystem.rs b/zk_toolbox/crates/config/src/ecosystem.rs index 7ff65d4612d..7e7c5d4dae5 100644 --- a/zk_toolbox/crates/config/src/ecosystem.rs +++ b/zk_toolbox/crates/config/src/ecosystem.rs @@ -151,6 +151,10 @@ impl EcosystemConfig { self.load_chain_inner(&name) } + pub fn load_current_chain(&self) -> Option { + self.load_chain_inner(self.current_chain()) + } + fn load_chain_inner(&self, name: &str) -> Option { let path = self.chains.join(name).join(CONFIG_NAME); let config = ChainConfigInternal::read(self.get_shell(), path.clone()).ok()?; diff --git a/zk_toolbox/crates/config/src/general.rs b/zk_toolbox/crates/config/src/general.rs index 87eb3a7eb19..a8e7407edd0 100644 --- a/zk_toolbox/crates/config/src/general.rs +++ b/zk_toolbox/crates/config/src/general.rs @@ -4,14 +4,14 @@ use anyhow::Context; use common::yaml::merge_yaml; use url::Url; use xshell::Shell; +use zksync_config::configs::object_store::ObjectStoreMode; pub use zksync_config::configs::GeneralConfig; -use zksync_config::configs::{consensus::Host, object_store::ObjectStoreMode}; use zksync_protobuf_config::{decode_yaml_repr, encode_yaml_repr}; use crate::{ consts::GENERAL_FILE, traits::{ConfigWithL2RpcUrl, FileConfigWithDefaultName, ReadConfig, SaveConfig}, - ChainConfig, DEFAULT_CONSENSUS_PORT, + ChainConfig, }; pub struct RocksDbs { @@ -113,68 +113,6 @@ pub fn set_file_artifacts(config: &mut GeneralConfig, file_artifacts: FileArtifa set_artifact_path!(config.core_object_store, file_artifacts.core_object_store); } -pub fn ports_config(config: &GeneralConfig) -> Option { - let api = config.api_config.as_ref()?; - let contract_verifier = config.contract_verifier.as_ref()?; - let consensus_port = if let Some(consensus_config) = config.clone().consensus_config { - consensus_config.server_addr.port() - } else { - DEFAULT_CONSENSUS_PORT - }; - - Some(PortsConfig { - web3_json_rpc_http_port: api.web3_json_rpc.http_port, - web3_json_rpc_ws_port: api.web3_json_rpc.ws_port, - healthcheck_port: api.healthcheck.port, - merkle_tree_port: api.merkle_tree.port, - prometheus_listener_port: api.prometheus.listener_port, - contract_verifier_port: contract_verifier.port, - consensus_port, - }) -} - -pub fn update_ports(config: &mut GeneralConfig, ports_config: &PortsConfig) -> anyhow::Result<()> { - let api = config - .api_config - .as_mut() - .context("Api config is not presented")?; - let contract_verifier = config - .contract_verifier - .as_mut() - .context("Contract Verifier config is not presented")?; - let prometheus = config - .prometheus_config - .as_mut() - .context("Prometheus config is not presented")?; - if let Some(consensus) = config.consensus_config.as_mut() { - consensus.server_addr.set_port(ports_config.consensus_port); - update_port_in_host(&mut consensus.public_addr, ports_config.consensus_port)?; - } - - api.web3_json_rpc.http_port = ports_config.web3_json_rpc_http_port; - update_port_in_url( - &mut api.web3_json_rpc.http_url, - ports_config.web3_json_rpc_http_port, - )?; - api.web3_json_rpc.ws_port = ports_config.web3_json_rpc_ws_port; - update_port_in_url( - &mut api.web3_json_rpc.ws_url, - ports_config.web3_json_rpc_ws_port, - )?; - contract_verifier.port = ports_config.contract_verifier_port; - update_port_in_url( - &mut contract_verifier.url, - ports_config.contract_verifier_port, - )?; - api.healthcheck.port = ports_config.healthcheck_port; - api.merkle_tree.port = ports_config.merkle_tree_port; - api.prometheus.listener_port = ports_config.prometheus_listener_port; - - prometheus.listener_port = ports_config.prometheus_listener_port; - - Ok(()) -} - pub fn override_config(shell: &Shell, path: PathBuf, chain: &ChainConfig) -> anyhow::Result<()> { let chain_config_path = chain.path_to_general_config(); let override_config = serde_yaml::from_str(&shell.read_file(path)?)?; @@ -184,60 +122,10 @@ pub fn override_config(shell: &Shell, path: PathBuf, chain: &ChainConfig) -> any Ok(()) } -fn update_port_in_url(http_url: &mut String, port: u16) -> anyhow::Result<()> { - let mut http_url_url = Url::parse(http_url)?; - if let Err(()) = http_url_url.set_port(Some(port)) { - anyhow::bail!("Wrong url, setting port is impossible"); - } - *http_url = http_url_url.to_string(); - Ok(()) -} - -fn update_port_in_host(host: &mut Host, port: u16) -> anyhow::Result<()> { - let url = Url::parse(&format!("http://{}", host.0))?; - let host_str = url.host_str().context("Failed to get host")?; - host.0 = format!("{host_str}:{port}"); - Ok(()) -} - impl FileConfigWithDefaultName for GeneralConfig { const FILE_NAME: &'static str = GENERAL_FILE; } -pub struct PortsConfig { - pub web3_json_rpc_http_port: u16, - pub web3_json_rpc_ws_port: u16, - pub healthcheck_port: u16, - pub merkle_tree_port: u16, - pub prometheus_listener_port: u16, - pub contract_verifier_port: u16, - pub consensus_port: u16, -} - -impl PortsConfig { - pub fn apply_offset(&mut self, offset: u16) { - self.web3_json_rpc_http_port += offset; - self.web3_json_rpc_ws_port += offset; - self.healthcheck_port += offset; - self.merkle_tree_port += offset; - self.prometheus_listener_port += offset; - self.contract_verifier_port += offset; - self.consensus_port += offset; - } - - pub fn next_empty_ports_config(&self) -> PortsConfig { - Self { - web3_json_rpc_http_port: self.web3_json_rpc_http_port + 100, - web3_json_rpc_ws_port: self.web3_json_rpc_ws_port + 100, - healthcheck_port: self.healthcheck_port + 100, - merkle_tree_port: self.merkle_tree_port + 100, - prometheus_listener_port: self.prometheus_listener_port + 100, - contract_verifier_port: self.contract_verifier_port + 100, - consensus_port: self.consensus_port + 100, - } - } -} - impl SaveConfig for GeneralConfig { fn save(&self, shell: &Shell, path: impl AsRef) -> anyhow::Result<()> { let bytes = diff --git a/zk_toolbox/crates/config/src/secrets.rs b/zk_toolbox/crates/config/src/secrets.rs index f0a39148b03..02ace5da88e 100644 --- a/zk_toolbox/crates/config/src/secrets.rs +++ b/zk_toolbox/crates/config/src/secrets.rs @@ -12,17 +12,15 @@ use crate::{ traits::{FileConfigWithDefaultName, ReadConfig, SaveConfig}, }; -pub fn set_databases( +pub fn set_server_database( secrets: &mut SecretsConfig, server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, ) -> anyhow::Result<()> { let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Server database must be presented")?; database.server_url = Some(SensitiveUrl::from(server_db_config.full_url())); - database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } @@ -33,7 +31,7 @@ pub fn set_prover_database( let database = secrets .database .as_mut() - .context("Databases must be presented")?; + .context("Prover database must be presented")?; database.prover_url = Some(SensitiveUrl::from(prover_db_config.full_url())); Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/Cargo.toml b/zk_toolbox/crates/zk_inception/Cargo.toml index 61983d59e6e..28b709c557b 100644 --- a/zk_toolbox/crates/zk_inception/Cargo.toml +++ b/zk_toolbox/crates/zk_inception/Cargo.toml @@ -15,7 +15,6 @@ anyhow.workspace = true clap.workspace = true cliclack.workspace = true config.workspace = true -console.workspace = true human-panic.workspace = true lazy_static.workspace = true serde_yaml.workspace = true @@ -37,3 +36,7 @@ clap-markdown.workspace = true zksync_consensus_roles.workspace = true zksync_consensus_crypto.workspace = true secrecy.workspace = true + +[build-dependencies] +eyre.workspace = true +ethers.workspace = true diff --git a/zk_toolbox/crates/zk_inception/README.md b/zk_toolbox/crates/zk_inception/README.md index 904b1421e3a..7923afe4e98 100644 --- a/zk_toolbox/crates/zk_inception/README.md +++ b/zk_toolbox/crates/zk_inception/README.md @@ -423,7 +423,6 @@ Prover related commands ###### **Subcommands:** - `init` — Initialize prover -- `generate-sk` — Generate setup keys - `run` — Run prover - `init-bellman-cuda` — Initialize bellman-cuda diff --git a/zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json b/zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json new file mode 100644 index 00000000000..3826d258072 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/abi/ConsensusRegistry.json @@ -0,0 +1,1135 @@ +{ + "_format": "hh-zksolc-artifact-1", + "contractName": "ConsensusRegistry", + "sourceName": "contracts/ConsensusRegistry.sol", + "abi": [ + { + "inputs": [], + "name": "AttesterPubKeyExists", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidInputBLS12_381PublicKey", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidInputBLS12_381Signature", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidInputNodeOwnerAddress", + "type": "error" + }, + { + "inputs": [], + "name": "InvalidInputSecp256k1PublicKey", + "type": "error" + }, + { + "inputs": [], + "name": "NodeOwnerDoesNotExist", + "type": "error" + }, + { + "inputs": [], + "name": "NodeOwnerExists", + "type": "error" + }, + { + "inputs": [], + "name": "NodeOwnerNotFound", + "type": "error" + }, + { + "inputs": [], + "name": "UnauthorizedOnlyOwnerOrNodeOwner", + "type": "error" + }, + { + "inputs": [], + "name": "ValidatorPubKeyExists", + "type": "error" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint32", + "name": "commit", + "type": "uint32" + } + ], + "name": "AttestersCommitted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint8", + "name": "version", + "type": "uint8" + } + ], + "name": "Initialized", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "nodeOwner", + "type": "address" + } + ], + "name": "NodeActivated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "nodeOwner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint32", + "name": "validatorWeight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "b", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "c", + "type": "bytes32" + } + ], + "indexed": false, + "internalType": "struct IConsensusRegistry.BLS12_381PublicKey", + "name": "validatorPubKey", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes16", + "name": "b", + "type": "bytes16" + } + ], + "indexed": false, + "internalType": "struct IConsensusRegistry.BLS12_381Signature", + "name": "validatorPoP", + "type": "tuple" + }, + { + "indexed": false, + "internalType": "uint32", + "name": "attesterWeight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes1", + "name": "tag", + "type": "bytes1" + }, + { + "internalType": "bytes32", + "name": "x", + "type": "bytes32" + } + ], + "indexed": false, + "internalType": "struct IConsensusRegistry.Secp256k1PublicKey", + "name": "attesterPubKey", + "type": "tuple" + } + ], + "name": "NodeAdded", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "nodeOwner", + "type": "address" + }, + { + "components": [ + { + "internalType": "bytes1", + "name": "tag", + "type": "bytes1" + }, + { + "internalType": "bytes32", + "name": "x", + "type": "bytes32" + } + ], + "indexed": false, + "internalType": "struct IConsensusRegistry.Secp256k1PublicKey", + "name": "newPubKey", + "type": "tuple" + } + ], + "name": "NodeAttesterKeyChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "nodeOwner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint32", + "name": "newWeight", + "type": "uint32" + } + ], + "name": "NodeAttesterWeightChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "nodeOwner", + "type": "address" + } + ], + "name": "NodeDeactivated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "nodeOwner", + "type": "address" + } + ], + "name": "NodeDeleted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "nodeOwner", + "type": "address" + } + ], + "name": "NodeRemoved", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "nodeOwner", + "type": "address" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "b", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "c", + "type": "bytes32" + } + ], + "indexed": false, + "internalType": "struct IConsensusRegistry.BLS12_381PublicKey", + "name": "newPubKey", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes16", + "name": "b", + "type": "bytes16" + } + ], + "indexed": false, + "internalType": "struct IConsensusRegistry.BLS12_381Signature", + "name": "newPoP", + "type": "tuple" + } + ], + "name": "NodeValidatorKeyChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "nodeOwner", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint32", + "name": "newWeight", + "type": "uint32" + } + ], + "name": "NodeValidatorWeightChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferStarted", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferred", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint32", + "name": "commit", + "type": "uint32" + } + ], + "name": "ValidatorsCommitted", + "type": "event" + }, + { + "inputs": [], + "name": "acceptOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_nodeOwner", + "type": "address" + } + ], + "name": "activate", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_nodeOwner", + "type": "address" + }, + { + "internalType": "uint32", + "name": "_validatorWeight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "b", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "c", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381PublicKey", + "name": "_validatorPubKey", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes16", + "name": "b", + "type": "bytes16" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381Signature", + "name": "_validatorPoP", + "type": "tuple" + }, + { + "internalType": "uint32", + "name": "_attesterWeight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes1", + "name": "tag", + "type": "bytes1" + }, + { + "internalType": "bytes32", + "name": "x", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.Secp256k1PublicKey", + "name": "_attesterPubKey", + "type": "tuple" + } + ], + "name": "add", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "name": "attesterPubKeyHashes", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "attestersCommit", + "outputs": [ + { + "internalType": "uint32", + "name": "", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_nodeOwner", + "type": "address" + }, + { + "components": [ + { + "internalType": "bytes1", + "name": "tag", + "type": "bytes1" + }, + { + "internalType": "bytes32", + "name": "x", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.Secp256k1PublicKey", + "name": "_pubKey", + "type": "tuple" + } + ], + "name": "changeAttesterKey", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_nodeOwner", + "type": "address" + }, + { + "internalType": "uint32", + "name": "_weight", + "type": "uint32" + } + ], + "name": "changeAttesterWeight", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_nodeOwner", + "type": "address" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "b", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "c", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381PublicKey", + "name": "_pubKey", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes16", + "name": "b", + "type": "bytes16" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381Signature", + "name": "_pop", + "type": "tuple" + } + ], + "name": "changeValidatorKey", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_nodeOwner", + "type": "address" + }, + { + "internalType": "uint32", + "name": "_weight", + "type": "uint32" + } + ], + "name": "changeValidatorWeight", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "commitAttesterCommittee", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "commitValidatorCommittee", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_nodeOwner", + "type": "address" + } + ], + "name": "deactivate", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "getAttesterCommittee", + "outputs": [ + { + "components": [ + { + "internalType": "uint32", + "name": "weight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes1", + "name": "tag", + "type": "bytes1" + }, + { + "internalType": "bytes32", + "name": "x", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.Secp256k1PublicKey", + "name": "pubKey", + "type": "tuple" + } + ], + "internalType": "struct IConsensusRegistry.CommitteeAttester[]", + "name": "", + "type": "tuple[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "getValidatorCommittee", + "outputs": [ + { + "components": [ + { + "internalType": "uint32", + "name": "weight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "b", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "c", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381PublicKey", + "name": "pubKey", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes16", + "name": "b", + "type": "bytes16" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381Signature", + "name": "proofOfPossession", + "type": "tuple" + } + ], + "internalType": "struct IConsensusRegistry.CommitteeValidator[]", + "name": "", + "type": "tuple[]" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_initialOwner", + "type": "address" + } + ], + "name": "initialize", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "name": "nodeOwners", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "nodes", + "outputs": [ + { + "internalType": "uint32", + "name": "attesterLastUpdateCommit", + "type": "uint32" + }, + { + "internalType": "uint32", + "name": "validatorLastUpdateCommit", + "type": "uint32" + }, + { + "internalType": "uint32", + "name": "nodeOwnerIdx", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "bool", + "name": "removed", + "type": "bool" + }, + { + "internalType": "uint32", + "name": "weight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes1", + "name": "tag", + "type": "bytes1" + }, + { + "internalType": "bytes32", + "name": "x", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.Secp256k1PublicKey", + "name": "pubKey", + "type": "tuple" + } + ], + "internalType": "struct IConsensusRegistry.AttesterAttr", + "name": "attesterLatest", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "bool", + "name": "removed", + "type": "bool" + }, + { + "internalType": "uint32", + "name": "weight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes1", + "name": "tag", + "type": "bytes1" + }, + { + "internalType": "bytes32", + "name": "x", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.Secp256k1PublicKey", + "name": "pubKey", + "type": "tuple" + } + ], + "internalType": "struct IConsensusRegistry.AttesterAttr", + "name": "attesterSnapshot", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "bool", + "name": "removed", + "type": "bool" + }, + { + "internalType": "uint32", + "name": "weight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "b", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "c", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381PublicKey", + "name": "pubKey", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes16", + "name": "b", + "type": "bytes16" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381Signature", + "name": "proofOfPossession", + "type": "tuple" + } + ], + "internalType": "struct IConsensusRegistry.ValidatorAttr", + "name": "validatorLatest", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bool", + "name": "active", + "type": "bool" + }, + { + "internalType": "bool", + "name": "removed", + "type": "bool" + }, + { + "internalType": "uint32", + "name": "weight", + "type": "uint32" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "b", + "type": "bytes32" + }, + { + "internalType": "bytes32", + "name": "c", + "type": "bytes32" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381PublicKey", + "name": "pubKey", + "type": "tuple" + }, + { + "components": [ + { + "internalType": "bytes32", + "name": "a", + "type": "bytes32" + }, + { + "internalType": "bytes16", + "name": "b", + "type": "bytes16" + } + ], + "internalType": "struct IConsensusRegistry.BLS12_381Signature", + "name": "proofOfPossession", + "type": "tuple" + } + ], + "internalType": "struct IConsensusRegistry.ValidatorAttr", + "name": "validatorSnapshot", + "type": "tuple" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "numNodes", + "outputs": [ + { + "internalType": "uint256", + "name": "", + "type": "uint256" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pendingOwner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_nodeOwner", + "type": "address" + } + ], + "name": "remove", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "renounceOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "name": "validatorPubKeyHashes", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "validatorsCommit", + "outputs": [ + { + "internalType": "uint32", + "name": "", + "type": "uint32" + } + ], + "stateMutability": "view", + "type": "function" + } + ], + "bytecode": "0x00020000000000020015000000000002000100000001035500000000030100190000006003300270000004ad0030019d0000008008000039000000400080043f000004ad033001970000000102200190000000200000c13d000000040230008c000011c30000413d000000000201043b000000e002200270000004af0420009c000000280000a13d000004b00420009c0000023f0000a13d000004b10420009c000002b60000a13d000004b20420009c000002e10000213d000004b50420009c000005590000613d000004b60120009c000011c30000c13d0000000001000416000000000101004b000011c30000c13d0000006501000039000004f40000013d0000000001000416000000000101004b000011c30000c13d000000200100003900000100001004430000012000000443000004ae01000041000012b00001042e000004c10420009c000001330000213d000004c90420009c0000024c0000213d000004cd0420009c000003970000613d000004ce0420009c000003040000613d000004cf0220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000d00000001001d000004d00110009c000011c30000213d0000003301000039000000000101041a000004d0021001970000000001000411000000000212004b000000450000613d0000000d0110006b000003b50000c13d0000000d0100002900000000001004350000009801000039000c00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000000610000c13d0000000902100039000000000202041a000000000202004b000000610000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000000000402041a0000002003400270000004ad03300197000000000701043b000000000107041a0000002005100270000004ad05500197000000000353004b0000000d030000390000000703002039000004ad04400197000004ad05100197000000000654004b000000040600003900000001060020390000000006760019000000000606041a0000ff000660019000000c540000613d0000000003730019000000000303041a0000ff000330019000000c540000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000b00000007001d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000b01000029000000000101041a000a00000001001d00000000003004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000b020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000005180310009c0000102e0000813d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000b040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000c01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000004b70000013d000004c20420009c0000026f0000213d000004c60420009c000003a20000613d000004c70420009c0000034b0000613d000004c80220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000400220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0000002401100370000000000101043b000c00000001001d000004ad0110009c000011c30000213d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000000d0100002900000000001004350000009801000039000b00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b0000016d0000c13d0000000902100039000000000202041a000000000202004b0000016d0000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000000000302041a0000002004300270000004ad04400197000000000701043b000000000107041a0000002005100270000004ad05500197000000000454004b0000000d050000390000000705002039000004ad03300197000004ad04100197000000000643004b000000040600003900000001060020390000000006760019000000000606041a0000ff0006600190000a00000007001d00000f8d0000613d0000000005750019000000000505041a0000ff000550019000000f8d0000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000a01000029000000000101041a000c00000001001d00000000003004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000c02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000a020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000c0100000000920000000c0220017f000000000021041b0000000a040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000c0220017f000000000021041b0000000d0100002900000000001004350000000b01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000011940000013d000004ba0420009c0000028f0000213d000004be0120009c000005400000613d000004bf0120009c000004f70000613d000004c00120009c000011c30000c13d0000000001000416000000000101004b000011c30000c13d0000003301000039000004f40000013d000004ca0420009c000003b90000613d000004cb0420009c000003510000613d000004cc0120009c000011c30000c13d0000000001000416000000000101004b000011c30000c13d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000009b01000039000000000201041a000004ad03200197000004ad0430009c0000054f0000613d000004f6022001970000000103300039000000000223019f000000000021041b000000800030043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004fd011001c70000800d0200003900000001030000390000050404000041000010190000013d000004c30420009c000004e60000613d000004c40420009c000003760000613d000004c50120009c000011c30000c13d0000000001000416000000000101004b000011c30000c13d0000003301000039000000000201041a000004d0052001970000000003000411000000000335004b0000067a0000c13d0000006503000039000000000403041a000004d104400197000000000043041b000004d102200197000000000021041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d020000390000000303000039000004dc040000410000000006000019000010190000013d000004bb0420009c000005530000613d000004bc0420009c000005030000613d000004bd0220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000600220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0000003302000039000000000202041a000004d0032001970000000002000411000000000323004b000002a80000613d0000000d0220006b000003b50000c13d0000002402100370000000000302043b000004f402300197000c00000003001d000000000232004b000011c30000c13d0000000c0200006b000009280000c13d0000004401100370000000000101043b000000000101004b000009280000c13d0000050e01000041000003b60000013d000004b70420009c000006830000613d000004b80420009c000005760000613d000004b90220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000d00000001001d000004d00110009c000011c30000213d000000000300041a000bff0000300194000007c80000c13d000000ff0130019000000000020000190000000102006039000e00000002001d00000000020004150000000e0220008a00000020022000c9000000000101004b000007cc0000c13d000001000100008a000000000113016f00000001011001bf000004db02000041000000000121016f00000100011001bf0000000003000019000000000010041b0000000d06000029000000000106004b000009650000c13d000000400100043d000004df02000041000009460000013d000004b30420009c0000056c0000613d000004b40220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000601043b000004d00160009c000011c30000213d0000003301000039000000000101041a000004d0051001970000000001000411000000000115004b0000067a0000c13d0000006501000039000000000201041a000004d102200197000000000262019f000000000021041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d020000390000000303000039000004d304000041000010190000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000004d00210009c000011c30000213d00000000001004350000009801000039000000200010043f0000004002000039000000000100001912af128f0000040f0000000002010019000d00000002001d000000000102041a000900000001001d000000010120003912af11d30000040f000c00000001001d0000000d01000029000000040110003912af11d30000040f000b00000001001d0000000d01000029000000070110003912af11fc0000040f0000000d020000290000000d02200039000a00000001001d000000000102001912af11fc0000040f00000009050000290000002002500270000004ad02200197000000400400043d000d00000004001d000000200340003900000000002304350000004002500270000004ad0220019700000040034000390000000000230435000004ad025001970000000000240435000900000001001d00000060024000390000000c0100002912af12360000040f0000000d0100002900000100021000390000000b0100002912af12360000040f0000000d01000029000001a0021000390000000a0100002912af124f0000040f0000000d01000029000002a002100039000000090100002912af124f0000040f000004ad010000410000000d03000029000004ad0230009c000000000301801900000040013002100000051b011001c7000012b00001042e0000000001000416000000000101004b000011c30000c13d0000009b01000039000000000101041a000005720000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000001400220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0015000d0000002d0000002402100370000000000202043b000c00000002001d000004ad0220009c000011c30000213d0014000c0000002d001300440000003d001200a40000003d000000e402100370000000000202043b000b00000002001d000004ad0220009c000011c30000213d0011000b0000002d001001040000003d0000003302000039000000000202041a000004d0022001970000000003000411000000000232004b0000067a0000c13d0000000d0200006b000009d10000c13d000004df01000041000003b60000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000c00220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0000003302000039000000000202041a000004d0032001970000000002000411000000000323004b000003890000613d0000000d0220006b000003b50000c13d0000002402100370000000000202043b000000000202004b000007f00000c13d0000004402100370000000000202043b000000000202004b000007f00000c13d0000006402100370000000000202043b000000000202004b000007f00000c13d0000051101000041000003b60000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b00000000001004350000009a01000039000005630000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000d00000001001d000004d00110009c000011c30000213d0000003301000039000000000101041a000004d0021001970000000001000411000000000212004b000006c80000613d0000000d0110006b000006c80000613d0000051501000041000000800010043f0000050f01000041000012b1000104300000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000d00000001001d000004d00110009c000011c30000213d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000000d0100002900000000001004350000009801000039000c00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000003e60000c13d0000000902100039000000000202041a000000000202004b000003e60000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000000000402041a0000002003400270000004ad03300197000000000701043b000000000107041a0000002005100270000004ad05500197000000000353004b0000000d030000390000000703002039000004ad04400197000004ad05100197000000000654004b000000040600003900000001060020390000000006760019000000000606041a0000ff000660019000000d3a0000613d0000000003730019000000000303041a0000ff000330019000000d3a0000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000b00000007001d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000b01000029000000000101041a000a00000001001d00000000003004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000b020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000b040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000c01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000001041b0000000102100039000000000002041b0000000202100039000000000002041b0000000302100039000000000002041b0000000402100039000000000002041b0000000502100039000000000002041b0000000602100039000000000002041b0000000702100039000000000002041b0000000802100039000000000002041b0000000902100039000000000002041b0000000a02100039000000000002041b0000000b02100039000000000002041b0000000c02100039000000000002041b0000000d02100039000000000002041b0000000e02100039000000000002041b0000000f02100039000000000002041b0000001002100039000000000002041b0000001102100039000000000002041b0000001201100039000000000001041b0000000001000414000004ad0210009c000004ad01008041000000c001100210000004d2011001c70000800d020000390000000203000039000004e80400004100000dae0000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b0000009702000039000000000302041a000000000331004b000011c30000813d0000000000200435000004e301100041000000000101041a000004d001100197000005730000013d0000000001000416000000000101004b000011c30000c13d0000006501000039000000000101041a000004d0021001970000000001000411000000000212004b000006bc0000c13d12af12740000040f0000000001000019000012b00001042e0000000001000416000000000101004b000011c30000c13d0000009701000039000b00000001001d000000000101041a000c00000001001d000004f20110009c0000102e0000213d0000000c010000290000000502100210000000bf01200039000000200300008a000000000131016f000004f20310009c0000102e0000213d000000400010043f0000000c03000029000000800030043f000000000303004b0000000003000019000008ec0000c13d000000800030043f00000020020000390000000002210436000000800300043d00000000003204350000004002100039000000000403004b000006b20000613d00000080040000390000000005000019000000200440003900000000060404330000000087060434000004ad077001970000000007720436000000000808043300000000a9080434000000000097043500000000070a0433000000400920003900000000007904350000004007800039000000000707043300000060082000390000000000780435000000400660003900000000060604330000008007200039000000008606043400000000006704350000000006080433000004ed06600197000000a0072000390000000000670435000000c0022000390000000105500039000000000635004b000005230000413d000006b20000013d0000000001000416000000000101004b000011c30000c13d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000009b01000039000000000201041a0000002003200270000004ad03300197000004ad0430009c000007b60000c13d000005160100004100000000001004350000001101000039000010310000013d0000000001000416000000000101004b000011c30000c13d0000009701000039000000000101041a000005730000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b00000000001004350000009901000039000000200010043f0000004002000039000000000100001912af128f0000040f000000000101041a000000ff011001900000000001000019000000010100c039000005730000013d0000000001000416000000000101004b000011c30000c13d0000009b01000039000000000101041a0000002001100270000004ad01100197000000800010043f000004d401000041000012b00001042e0000000002000416000000000202004b000011c30000c13d000000040230008a000000400220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0000002401100370000000000101043b000c00000001001d000004ad0110009c000011c30000213d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000000d0100002900000000001004350000009801000039000b00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000005a80000c13d0000000902100039000000000202041a000000000202004b000005a80000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000701043b0000009b01000039000000000301041a000004ad04300197000000000207041a000004ad05200197000000000454004b000000040500003900000001050020390000002003300270000004ad033001970000002004200270000004ad044001970000000006750019000000000543004b0000000d050000390000000705002039000000000606041a0000ff000660019000000fcd0000613d0000000005750019000000000505041a0000ff000550019000000fcd0000613d000c00000007001d0000009701000039000000000401041a000000000304004b0000054f0000613d000004e203400041000000000303041a000004d0033001970000004002200270000004ad02200197000000000424004b00000e8e0000a13d000004e302200041000000000402041a000004d104400197000000000434019f000000000042041b000000000201041a000000000402004b000009cd0000613d000004e204200041000000000504041a000004d105500197000000000054041b000000010220008a000000000021041b0000000c01000029000000000101041a000a00000001001d00000000003004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000c020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000c040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000b01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000011940000013d000004d901000041000000800010043f0000002001000039000000840010043f000000a40010043f0000051201000041000000c40010043f0000051301000041000012b1000104300000000001000416000000000101004b000011c30000c13d0000009701000039000b00000001001d000000000101041a000c00000001001d000004f20110009c0000102e0000213d0000000c010000290000000502100210000000bf01200039000000200300008a000000000131016f000004f20310009c0000102e0000213d000000400010043f0000000c03000029000000800030043f000000000303004b0000000003000019000009100000c13d000000800030043f00000020020000390000000002210436000000800300043d00000000003204350000004002100039000000000403004b000006b20000613d0000000004000019000000200880003900000000050804330000000065050434000004ad05500197000000000552043600000000060604330000000076060434000004f406600197000000000065043500000000050704330000004006200039000000000056043500000060022000390000000104400039000000000534004b000006a20000413d0000000002120049000004ad03000041000004ad0420009c0000000002038019000004ad0410009c000000000103801900000040011002100000006002200210000000000112019f000012b00001042e000004d901000041000000800010043f0000002001000039000000840010043f0000002901000039000000a40010043f000004f901000041000000c40010043f000004fa01000041000000e40010043f000004fb01000041000012b1000104300000000d0100002900000000001004350000009801000039000c00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000006e40000c13d0000000902100039000000000202041a000000000202004b000006e40000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000000000402041a0000002003400270000004ad03300197000000000701043b000000000107041a0000002005100270000004ad05500197000000000353004b0000000d030000390000000703002039000004ad04400197000004ad05100197000000000654004b000000040600003900000001060020390000000006760019000000000606041a0000ff000660019000000cc80000613d0000000003730019000000000303041a0000ff000330019000000cc80000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000b00000007001d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000b01000029000000000101041a000a00000001001d00000000003004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000b020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000b040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000c01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000011940000013d000004ef022001970000002003300210000004fc03300041000004ee04300197000000000224019f000000000021041b0000002001300270000000800010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004fd011001c70000800d020000390000000103000039000004fe04000041000010190000013d00000000020004150000000f0220008a00000020022000c9000f00000000001d000c00000002001d000a00000003001d000004d501000041000000000010043900000000010004100000000400100443000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d6011001c7000080020200003912af12aa0000040f0000000102200190000007fd0000613d000000000101043b000000000101004b0000094d0000c13d0000000a01000029000000ff0110018f000000010110008c000000000100001900000001010060390000000c020000290000000502200270000000000201001f000009500000c13d000001000100008a000000000200041a000000000112016f000000010300003900000001011001bf0000000b0200006b000002da0000c13d000002d60000013d0000008402100370000000000202043b000000000202004b000007fe0000c13d000000a401100370000000000101043b000004ed02100197000000000212004b000011c30000c13d000004ff0110009c000007fe0000813d0000051001000041000003b60000013d000000000001042f0000000d0100002900000000001004350000009801000039000c00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b0000081a0000c13d0000000902100039000000000202041a000000000202004b0000081a0000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b06000039000000000306041a0000002002300270000004ad02200197000000000501043b000000000105041a0000002004100270000004ad04400197000000000242004b0000000d020000390000000702002039000004ad03300197000004ad04100197000000000343004b000000040300003900000001030020390000000003530019000000000303041a0000ff0003300190000b00000005001d00000e920000613d0000000002520019000000000202041a0000ff000220019000000e920000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000b01000029000000000101041a000a00000001001d00000000003004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000b020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000b040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000c01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000011940000013d000004e60310009c0000102e0000213d00000000030000190000006004100039000000400040043f0000000004010436000000400500043d000004e60650009c0000102e0000213d0000006006500039000000400060043f000000400650003900000000000604350000002006500039000000000006043500000000000504350000000000540435000000400400043d000004f30540009c0000102e0000213d0000004005400039000000400050043f00000020054000390000000000050435000000000004043500000040051000390000000000450435000000a00430003900000000001404350000002003300039000000000123004b00000e150000813d000000400100043d000004e60410009c000008ef0000a13d0000102e0000013d000004f30310009c0000102e0000213d00000000030000190000004004100039000000400040043f0000000004010436000000400500043d000004f30650009c0000102e0000213d0000004006500039000000400060043f0000002006500039000000000006043500000000000504350000000000540435000000a00430003900000000001404350000002003300039000000000123004b00000daf0000813d000000400100043d000004f30410009c000009130000a13d0000102e0000013d0000000d0100002900000000001004350000009801000039000b00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000009920000c13d0000000902100039000000000202041a000000000202004b000009920000c13d0000000a01100039000000000101041a000000000101004b000009920000c13d000000400100043d0000051a020000410000000000210435000004ad02000041000004ad0310009c00000000010280190000004001100210000004e0011001c7000012b1000104300000000c010000290000000501100270000000000100001f000000400100043d0000006402100039000004d70300004100000000003204350000004402100039000004d803000041000000000032043500000024021000390000002e030000390000000000320435000004d9020000410000000000210435000000040210003900000020030000390000000000320435000004ad02000041000004ad0310009c00000000010280190000004001100210000004da011001c7000012b100010430000c00000003001d0000006501000039000000000201041a000004d102200197000000000021041b0000003301000039000000000201041a000004d103200197000000000363019f000000000031041b000004ad010000410000000003000414000004ad0430009c0000000003018019000000c001300210000004d2011001c7000004d0052001970000800d020000390000000303000039000004dc0400004112af12a50000040f0000000101200190000011c30000613d0000000c0100006b0000101c0000c13d000004db01000041000000000200041a000000000112016f000000000010041b0000000103000039000000400100043d0000000000310435000004ad020000410000000004000414000004ad0540009c0000000004028019000004ad0510009c00000000010280190000004001100210000000c002400210000000000112019f000004dd011001c70000800d02000039000004de04000041000010190000013d0000000d0100002900000000001004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000900000002001d000000000302041a0000002002300270000004ad02200197000000000501043b000000000105041a0000002004100270000004ad04400197000000000242004b0000000d020000390000000702002039000004ad03300197000004ad04100197000000000343004b000000040300003900000001030020390000000003530019000000000303041a0000ff0003300190000a00000005001d0000101e0000613d0000000002520019000000000202041a0000ff00022001900000101e0000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000010fd0000c13d000005160100004100000000001004350000003101000039000010310000013d0000004402100370000000000202043b000000000202004b000009dd0000c13d0000006402100370000000000202043b000000000202004b000009dd0000c13d0000008402100370000000000202043b000000000202004b000003950000613d000000a402100370000000000202043b000000000202004b000009e80000c13d000000c402100370000000000202043b000004ed03200197000000000323004b000011c30000c13d000004ec0220009c000007fb0000a13d0000010402100370000000000202043b000004f403200197000000000323004b000011c30000c13d000005050220009c000009f30000213d0000012401100370000000000101043b000000000101004b000002b40000613d0000000d0100002900000000001004350000009801000039000a00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000010f70000c13d0000000902100039000000000202041a000000000202004b000010f70000c13d0000000a01100039000000000101041a000000000101004b000010f70000c13d00000001010003670000010402100370000000000302043b000004f402300197000000000232004b000011c30000c13d0000012401100370000000000201043b000000400100043d000000400410003900000000002404350000002002100039000000000032043500000040030000390000000000310435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000900000001001d00000000001004350000009901000039000800000001001d000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000400200043d000000000101043b000000000101041a000000ff01100190000011cb0000c13d00000001010003670000004403100370000000000303043b0000006404100370000000000404043b0000008401100370000000000101043b00000060052000390000000000150435000000400120003900000000004104350000002001200039000000000031043500000060030000390000000000320435000004e70320009c0000102e0000213d0000008003200039000000400030043f000004ad04000041000004ad0310009c000000000104801900000040011002100000000002020433000004ad0320009c00000000020480190000006002200210000000000112019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000700000001001d00000000001004350000009a01000039000600000001001d000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000101041a000000ff01100190000011c50000c13d0000009703000039000000000203041a000004ad01200197000004f20420009c0000102e0000213d0000000104200039000000000043041b0000000000300435000004e302200041000000000302041a000004d1033001970000000d033001af000000000032041b000000400200043d000004e70320009c0000102e0000213d0000008003200039000000400030043f00000040032000390000000b0400002900000000004304350000000107000039000000000372043600000000000304350000000008000031000001040380008a0000050804000041000000400530008c000000000500001900000000050440190000050803300197000000000603004b000000000400a019000005080330009c000000000405c019000000000304004b000011c30000c13d000000400300043d000004f30430009c0000102e0000213d0000004004300039000000400040043f00000001060003670000010404600370000000000404043b000004f405400197000000000545004b000011c30000c13d00000000044304360000012405600370000000000505043b000000000054043500000060042000390000000000340435000000400400043d000004f30340009c0000102e0000213d0000004003400039000000400030043f000000200340003900000000000304350000000000040435000000400300043d000004e70530009c0000102e0000213d0000008005300039000000400050043f00000060053000390000000000450435000000400430003900000000000404350000002004300039000000000004043500000000000304350000009b04000039000000000404041a000000400500043d000004f80950009c0000102e0000213d000000a009500039000000400090043f00000040095000390000000c0a0000290000000000a90435000000440880008a0000050809000041000000600a80008c000000000a000019000000000a0940190000050808800197000000000b08004b000000000900a019000005080880009c00000000090ac01900000000077504360000000000070435000000000709004b000011c30000c13d000000400700043d000004e60870009c0000102e0000213d0000006008700039000000400080043f0000004408600370000000000808043b00000000088704360000006409600370000000000909043b00000000009804350000008408600370000000000808043b0000004009700039000000000089043500000060085000390000000000780435000000400700043d000004f30870009c0000102e0000213d0000004008700039000000400080043f000000a408600370000000000808043b0000000008870436000000c406600370000000000606043b000004ed09600197000000000969004b000011c30000c13d000000000068043500000080065000390000000000760435000000400600043d000004e60760009c0000102e0000213d0000006007600039000000400070043f00000040076000390000000000070435000000200760003900000000000704350000000000060435000000400800043d000004f30780009c0000102e0000213d0000004007800039000000400070043f000000200780003900000000000704350000000000080435000000400700043d000004f80970009c0000102e0000213d000000a009700039000000400090043f000000800970003900000000008904350000006008700039000000000068043500000040067000390000000000060435000000200670003900000000000604350000000000070435000000400600043d000c00000006001d000005090660009c0000102e0000213d0000000c08000029000000e006800039000000400060043f000000c006800039000b00000006001d0000000000760435000000a006800039000500000006001d00000000005604350000008005800039000400000005001d00000000003504350000006003800039000300000003001d00000000002304350000004002800039000200000002001d0000000000120435000004ad0140019700000000021804360000002001400270000004ad01100197000100000002001d00000000001204350000000d0100002900000000001004350000000a01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000c020000290000000002020433000004ad02200197000000000101043b000000000301041a0000050a03300197000000000223019f000000010300002900000000030304330000002003300210000004ee03300197000000000232019f000000020300002900000000030304330000004003300210000004e403300197000000000232019f000000000021041b0000000102100039000000000302041a0000050b03300197000000030400002900000000040404330000000065040434000000000505004b000000010330c1bf0000000005060433000000000505004b000001000330c1bf000000400540003900000000050504330000001005500210000004eb05500197000000000353019f000000000032041b00000060024000390000000203100039000d010000000092000000000403041a0000000d0440017f00000000020204330000000052020434000000f802200270000000000224019f000000000023041b00000000020504330000000303100039000000000023041b0000000402100039000000000302041a0000050b03300197000000040400002900000000040404330000000065040434000000000505004b000000010330c1bf0000000005060433000000000505004b000001000330c1bf000000400540003900000000050504330000001005500210000004eb05500197000000000353019f000000000032041b0000000502100039000000000302041a0000000d0330017f000000600440003900000000040404330000000054040434000000f804400270000000000343019f000000000032041b00000000020504330000000603100039000000000023041b0000000702100039000000000302041a0000050b03300197000000050400002900000000040404330000000065040434000000000505004b000000010330c1bf0000000005060433000000000505004b000001000330c1bf000000400540003900000000050504330000001005500210000004eb05500197000000000353019f000000000032041b0000006002400039000000000202043300000008031000390000000065020434000000000053041b00000000030604330000000905100039000000000035041b000000400220003900000000020204330000000a03100039000000000023041b000000800240003900000000020204330000000b031000390000000042020434000000000023041b000000000204043300000080022002700000000c03100039000000000403041a000004ed04400197000000000224019f000000000023041b0000000d02100039000000000302041a0000050b033001970000000b0400002900000000040404330000000065040434000000000505004b000000010330c1bf0000000005060433000000000505004b000001000330c1bf000000400540003900000000050504330000001005500210000004eb05500197000000000353019f000000000032041b000000600240003900000000020204330000000e031000390000000065020434000000000053041b00000000030604330000000f05100039000000000035041b000000400220003900000000020204330000001003100039000000000023041b0000008002400039000000000202043300000011031000390000000042020434000000000023041b000000120110003900000000020404330000008002200270000000000301041a000004ed03300197000000000223019f000000000021041b000000090100002900000000001004350000000801000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000d0220017f00000001022001bf000000000021041b000000070100002900000000001004350000000601000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000d0220017f00000001022001bf000000000021041b000004d001000041000000150510017f000004ad01000041000000140310017f0000001104000029000000100200002900000012060000290000001307000029000000400100043d00000000083104360000000103000367000000000973034f000000000909043b00000000009804350000002008700039000000000883034f000000000808043b000000400910003900000000008904350000004007700039000000000773034f000000000707043b00000060081000390000000000780435000000000763034f000000000707043b000000800810003900000000007804350000002006600039000000000663034f000000000606043b000004ed07600197000000000767004b000011c30000c13d000004ad04400197000000c0071000390000000000470435000000a0041000390000000000640435000000000423034f000000000404043b000004f406400197000000000646004b000011c30000c13d000000e00610003900000000004604350000002002200039000000000223034f000000000202043b00000100031000390000000000230435000004ad020000410000000003000414000004ad0430009c0000000003028019000004ad0410009c00000000010280190000004001100210000000c002300210000000000112019f0000050c011001c70000800d0200003900000002030000390000050d04000041000010190000013d0000000103700039000000000154004b00000c7c0000a13d0000000401700039000000000401041a000001000500008a000000000454016f000000000603041a000000ff06600190000000010440c1bf000000000041041b000004e906000041000000000464016f000000000603041a0000ff0006600190000001000440c1bf000000000041041b000004ea04400197000000000603041a000004eb06600197000000000464019f000000000041041b0000000501700039000000000401041a000000000454016f0000000205700039000000000505041a000000ff0550018f000000000454019f000000000041041b0000000301700039000000000101041a0000000604700039000000000014041b000000000102041a000004ad01100197000000000407041a000004f604400197000000000114019f000000000017041b000000000403041a000001000100008a000000000414016f00000001044001bf000000000043041b0000000703700039000000000402041a0000002004400270000004ad04400197000000000507041a0000002005500270000004ad05500197000000000445004b00000cba0000813d0000000d04700039000000000504041a000000000515016f000000000603041a000000ff06600190000000010550c1bf000000000054041b000004e906000041000000000565016f000000000603041a0000ff0006600190000001000550c1bf000000000054041b000004ea05500197000000000603041a000004eb06600197000000000565019f000000000054041b0000000804700039000000000404041a0000000e05700039000000000045041b0000000904700039000000000404041a0000000f05700039000000000045041b0000000a04700039000000000404041a0000001005700039000000000045041b0000000b04700039000000000404041a0000001105700039000000000045041b0000000c04700039000000000404041a000004ec044001970000001205700039000000000605041a000004ed06600197000000000446019f000000000045041b000000000202041a000004ee02200197000000000407041a000004ef04400197000000000224019f000000000027041b000000000203041a000000000112016f00000001011001bf000000000013041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d0200003900000002030000390000051904000041000010180000013d0000000103700039000000000154004b00000cf00000a13d0000000401700039000000000401041a000001000500008a000000000454016f000000000603041a000000ff06600190000000010440c1bf000000000041041b000004e906000041000000000464016f000000000603041a0000ff0006600190000001000440c1bf000000000041041b000004ea04400197000000000603041a000004eb06600197000000000464019f000000000041041b0000000501700039000000000401041a000000000454016f0000000205700039000000000505041a000000ff0550018f000000000454019f000000000041041b0000000301700039000000000101041a0000000604700039000000000014041b000000000102041a000004ad01100197000000000407041a000004f604400197000000000114019f000000000017041b000000000403041a000001000100008a000000000414016f000000000043041b0000000703700039000000000402041a0000002004400270000004ad04400197000000000507041a0000002005500270000004ad05500197000000000445004b00000d2d0000813d0000000d04700039000000000504041a000000000515016f000000000603041a000000ff06600190000000010550c1bf000000000054041b000004e906000041000000000565016f000000000603041a0000ff0006600190000001000550c1bf000000000054041b000004ea05500197000000000603041a000004eb06600197000000000565019f000000000054041b0000000804700039000000000404041a0000000e05700039000000000045041b0000000904700039000000000404041a0000000f05700039000000000045041b0000000a04700039000000000404041a0000001005700039000000000045041b0000000b04700039000000000404041a0000001105700039000000000045041b0000000c04700039000000000404041a000004ec044001970000001205700039000000000605041a000004ed06600197000000000446019f000000000045041b000000000202041a000004ee02200197000000000407041a000004ef04400197000000000224019f000000000027041b000000000203041a000000000112016f000000000013041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d0200003900000002030000390000050304000041000010180000013d0000000103700039000000000154004b00000d620000a13d0000000401700039000000000401041a000001000500008a000000000454016f000000000603041a000000ff06600190000000010440c1bf000000000041041b000004e906000041000000000464016f000000000603041a0000ff0006600190000001000440c1bf000000000041041b000004ea04400197000000000603041a000004eb06600197000000000464019f000000000041041b0000000501700039000000000401041a000000000454016f0000000205700039000000000505041a000000ff0550018f000000000454019f000000000041041b0000000301700039000000000101041a0000000604700039000000000014041b000000000102041a000004ad01100197000000000407041a000004f604400197000000000114019f000000000017041b000000000403041a000004db01000041000000000414016f00000100044001bf000000000043041b0000000703700039000000000402041a0000002004400270000004ad04400197000000000507041a0000002005500270000004ad05500197000000000445004b00000da10000813d0000000d04700039000000000504041a000001000600008a000000000565016f000000000603041a000000ff06600190000000010550c1bf000000000054041b000004e906000041000000000565016f000000000603041a0000ff0006600190000001000550c1bf000000000054041b000004ea05500197000000000603041a000004eb06600197000000000565019f000000000054041b0000000804700039000000000404041a0000000e05700039000000000045041b0000000904700039000000000404041a0000000f05700039000000000045041b0000000a04700039000000000404041a0000001005700039000000000045041b0000000b04700039000000000404041a0000001105700039000000000045041b0000000c04700039000000000404041a000004ec044001970000001205700039000000000605041a000004ed06600197000000000446019f000000000045041b000000000202041a000004ee02200197000000000407041a000004ef04400197000000000224019f000000000027041b000000000203041a000000000112016f00000100011001bf000000000013041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d0200003900000002030000390000051404000041000010180000013d0000009b01000039000000000101041a000704ad0010019b000900980000003d000880100000003d0000000007000019000600000000001d000a00000008001d00000dbb0000013d00000001077000390000000c0170006c000010f40000813d0000000b01000029000000000101041a000000000171004b00000e8e0000a13d000d00000007001d000004e301700041000000000101041a000004d00110019700000000001004350000000901000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000000080200002912af12aa0000040f0000000a080000290000000102200190000011c30000613d000000000101043b000000000201041a000004ad02200197000000070220006b00000004020000390000000102002039000000400300043d000004e70430009c0000000d070000290000102e0000213d00000000051200190000008001300039000000400010043f000000000205041a0000001001200270000004ad0410019700000040013000390000000000410435000000ff042001900000000004000019000000010400c03900000000044304360000ff00022001900000000002000019000000010200c0390000000000240435000000400200043d000004f30620009c0000102e0000213d0000004006200039000000400060043f0000000106500039000000000606041a000000f80660021000000000066204360000000205500039000000000505041a0000000000560435000000600530003900000000002504350000000003030433000000000303004b00000db80000613d0000000003040433000000000303004b00000db80000c13d000000400300043d000004f30430009c0000102e0000213d00000000010104330000004004300039000000400040043f00000020043000390000000000240435000004ad011001970000000000130435000000800100043d000000060110006c00000e8e0000a13d00000006020000290000000501200210000000a0011000390000000000310435000000800100043d000000000121004b00000e8e0000a13d0000000601000029000600010010003d00000db80000013d0000009b01000039000000000101041a0000002001100270000804ad0010019b000a00980000003d000980100000003d0000000008000019000700000000001d00000e230000013d0000000701000029000700010010003d00000001088000390000000c0180006c000010fa0000813d0000000b01000029000000000101041a000000000181004b00000e8e0000a13d000d00000008001d000004e301800041000000000101041a000004d00110019700000000001004350000000a01000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000000090200002912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000002002200270000004ad02200197000000080220006b0000000d030000390000000703002039000000400200043d000004f80420009c0000000d080000290000102e0000213d0000000004130019000000a001200039000000400010043f000000000504041a0000001001500270000004ad0310019700000040012000390000000000310435000000ff035001900000000003000019000000010300c03900000000033204360000ff00055001900000000005000019000000010500c0390000000000530435000000400500043d000004e60650009c0000102e0000213d0000006006500039000000400060043f0000000106400039000000000606041a00000000066504360000000207400039000000000707041a00000000007604350000000306400039000000000606041a0000004007500039000000000067043500000060062000390000000000560435000000400500043d000004f30750009c0000102e0000213d0000004007500039000000400070043f0000000407400039000000000707041a00000000077504360000000504400039000000000404041a00000080044002100000000000470435000000800420003900000000005404350000000002020433000000000202004b00000e200000613d0000000002030433000000000202004b00000e200000c13d000000400200043d000004e60320009c0000102e0000213d000000000101043300000000030604330000006004200039000000400040043f0000004004200039000000000054043500000020042000390000000000340435000004ad011001970000000000120435000000800100043d000000070110006c00000e8e0000a13d00000007030000290000000501300210000000a0011000390000000000210435000000800100043d000000000131004b00000e1e0000213d000005160100004100000000001004350000003201000039000010310000013d000a00000006001d0000000a0150003900000009025000390000000803500039000700000003001d000000000303041a000800000002001d000000000202041a000900000001001d000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000200210003900000000003204350000006003000039000c00000003001d0000000000310435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000600000001001d000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0005010000000092000000050220017f000000000021041b00000001010003670000002402100370000000000302043b0000004402100370000000000202043b0000006401100370000000000401043b000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000200210003900000000003204350000000c030000290000000000310435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000c00000001001d00000000001004350000000601000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000101041a000000ff01100190000011c50000c13d0000000c0100002900000000001004350000000601000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000000050220017f00000001022001bf000000000021041b0000000b030000290000000b013000390000000a02000029000000000202041a0000002002200270000004ad02200197000000000303041a0000002003300270000004ad03300197000000000223004b00000f590000813d0000000b060000290000000d02600039000000000302041a000000050330017f0000000704600039000000000504041a000000ff05500190000000010330c1bf000000000032041b000004e905000041000000000353016f000000000504041a0000ff0005500190000001000330c1bf000000000032041b000004ea03300197000000000404041a000004eb04400197000000000343019f000000000032041b0000000702000029000000000202041a0000000e03600039000000000023041b0000000802000029000000000202041a0000000f03600039000000000023041b0000000902000029000000000202041a0000001003600039000000000023041b000000000201041a0000001103600039000000000023041b0000000c02600039000000000202041a000004ec022001970000001203600039000000000403041a000004ed04400197000000000224019f000000000023041b0000000a02000029000000000202041a000004ee02200197000000000306041a000004ef03300197000000000223019f000000000026041b00000001060003670000002402600370000000000202043b0000000703000029000000000023041b0000004403600370000000000303043b0000000804000029000000000034041b0000006404600370000000000404043b0000000905000029000000000045041b0000008405600370000000000505043b000000000051041b000000a401600370000000000101043b000004ed06100197000000000616004b000011c30000c13d0000000b060000290000000c06600039000000000706041a000004ed077001970000008008100270000000000787019f000000000076041b000000400600043d000000800760003900000000001704350000006001600039000000000051043500000040016000390000000000410435000000200160003900000000003104350000000000260435000004ad010000410000000002000414000004ad0320009c0000000002018019000004ad0360009c00000000060180190000004001600210000000c002200210000000000112019f00000500011001c70000800d0200003900000002030000390000050104000041000010180000013d0000000101700039000000000343004b00000fb60000a13d0000000a070000290000000403700039000000000403041a000001000500008a000000000454016f000000000601041a000000ff06600190000000010440c1bf000000000043041b000004e906000041000000000464016f000000000601041a0000ff0006600190000001000440c1bf000000000043041b000004ea04400197000000000601041a000004eb06600197000000000464019f000000000043041b0000000503700039000000000403041a000000000454016f0000000205700039000000000505041a000000ff0550018f000000000454019f000000000043041b0000000303700039000000000303041a0000000604700039000000000034041b000000000202041a000004ad02200197000000000307041a000004f603300197000000000223019f000000000027041b0000000c040000290000001002400210000004eb02200197000000000301041a000004f003300197000000000223019f000000000021041b000000400100043d0000000000410435000004ad020000410000000003000414000004ad0430009c0000000003028019000004ad0410009c00000000010280190000004001100210000000c002300210000000000112019f000004dd011001c70000800d0200003900000002030000390000050204000041000010180000013d00000000060700190000000702700039000000000343004b000010020000a13d0000000d03600039000000000403041a000001000500008a000000000454016f000000000502041a000000ff05500190000000010440c1bf000000000043041b000004e905000041000000000454016f000000000502041a0000ff0005500190000001000440c1bf000000000043041b000004ea04400197000000000502041a000004eb05500197000000000454019f000000000043041b0000000803600039000000000303041a0000000e04600039000000000034041b0000000903600039000000000303041a0000000f04600039000000000034041b0000000a03600039000000000303041a0000001004600039000000000034041b0000000b03600039000000000303041a0000001104600039000000000034041b0000000c03600039000000000303041a000004ec033001970000001204600039000000000504041a000004ed05500197000000000335019f000000000034041b000000000101041a000004ee01100197000000000306041a000004ef03300197000000000113019f000000000016041b0000000c040000290000001001400210000004eb01100197000000000302041a000004f003300197000000000113019f000000000012041b000000400100043d0000000000410435000004ad020000410000000003000414000004ad0430009c0000000003028019000004ad0410009c00000000010280190000004001100210000000c002300210000000000112019f000004dd011001c70000800d020000390000000203000039000004f1040000410000000d0500002912af12a50000040f0000000101200190000011c30000613d0000000001000019000012b00001042e00000003015000390000000202500039000700000002001d000000000302041a000800000001001d000000000201041a000000400100043d000000400410003900000000002404350000004002000039000b00000002001d0000000002210436000000f8033002100000000000320435000004e60310009c000010340000a13d000005160100004100000000001004350000004101000039000000040010043f0000051701000041000012b1000104300000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000600000001001d000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0005010000000092000000050220017f000000000021041b00000001010003670000002402100370000000000302043b000004f402300197000000000232004b000011c30000c13d0000004401100370000000000201043b000000400100043d00000040041000390000000000240435000000200210003900000000003204350000000b030000290000000000310435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000b00000001001d00000000001004350000000601000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000101041a000000ff01100190000011c80000c13d0000000b0100002900000000001004350000000601000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000000050220017f00000001022001bf000000000021041b0000000901000029000000000101041a000004ad011001970000000a02000029000000000202041a000004ad02200197000000000112004b000010d60000813d0000000a050000290000000401500039000000000201041a000000050220017f0000000103500039000000000403041a000000ff04400190000000010220c1bf000000000021041b000004e904000041000000000242016f000000000403041a0000ff0004400190000001000220c1bf000000000021041b000004ea02200197000000000303041a000004eb03300197000000000232019f000000000021041b0000000501500039000000000201041a000000050220017f0000000703000029000000000303041a000000ff0330018f000000000232019f000000000021041b0000000801000029000000000101041a0000000602500039000000000012041b0000000901000029000000000101041a000004ad01100197000000000205041a000004f602200197000000000112019f000000000015041b0000000703000029000000000103041a000000050110017f0000000c04000029000000f802400270000000000121019f000000000013041b00000044010000390000000101100367000000000101043b0000000802000029000000000012041b000000400200043d000000200320003900000000001304350000000000420435000004ad010000410000000003000414000004ad0430009c0000000003018019000004ad0420009c00000000020180190000004001200210000000c002300210000000000112019f000004e1011001c70000800d020000390000000203000039000004f704000041000010180000013d000000400100043d0000000603000029000006990000013d000000400100043d0000050602000041000009460000013d000000400100043d0000000703000029000005190000013d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000a01000029000000000101041a000c00000001001d00000000003004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000c02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000a020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000c0100000000920000000c0220017f000000000021041b0000000a040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000c0220017f000000000021041b0000000d0100002900000000001004350000000b01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000001041b0000000102100039000000000002041b0000000202100039000000000002041b0000000302100039000000000002041b0000000402100039000000000002041b0000000502100039000000000002041b0000000602100039000000000002041b0000000702100039000000000002041b0000000802100039000000000002041b0000000902100039000000000002041b0000000a02100039000000000002041b0000000b02100039000000000002041b0000000c02100039000000000002041b0000000d02100039000000000002041b0000000e02100039000000000002041b0000000f02100039000000000002041b0000001002100039000000000002041b0000001102100039000000000002041b0000001201100039000000000001041b0000000001000414000004ad0210009c000004ad01008041000000c001100210000004d2011001c70000800d020000390000000203000039000004e804000041000010180000013d0000000001000019000012b100010430000000400100043d0000050702000041000009460000013d000000400100043d000004f502000041000009460000013d000004f5010000410000000000120435000004ad01000041000004ad0320009c00000000020180190000004001200210000004e0011001c7000012b1000104300000000002010019000000400100043d0000051c0310009c000011f60000813d0000008003100039000000400030043f000000000302041a0000001004300270000004ad04400197000000400510003900000000004504350000ff00043001900000000004000019000000010400c03900000020051000390000000000450435000000ff033001900000000003000019000000010300c0390000000000310435000000400300043d000004f30430009c000011f60000213d0000004004300039000000400040043f0000000104200039000000000404041a000000f80440021000000000044304360000000202200039000000000202041a000000000024043500000060021000390000000000320435000000000001042d000005160100004100000000001004350000004101000039000000040010043f0000051701000041000012b1000104300000000002010019000000400100043d0000051d0310009c000012300000813d000000a003100039000000400030043f000000000302041a0000001004300270000004ad04400197000000400510003900000000004504350000ff00043001900000000004000019000000010400c03900000020051000390000000000450435000000ff033001900000000003000019000000010300c0390000000000310435000000400300043d000004e60430009c000012300000213d0000006004300039000000400040043f0000000104200039000000000404041a00000000044304360000000205200039000000000505041a00000000005404350000000304200039000000000404041a0000004005300039000000000045043500000060041000390000000000340435000000400300043d000004f30430009c000012300000213d0000004004300039000000400040043f0000000404200039000000000404041a00000000044304360000000502200039000000000202041a0000008002200210000000000024043500000080021000390000000000320435000000000001042d000005160100004100000000001004350000004101000039000000040010043f0000051701000041000012b1000104300000000043010434000000000303004b0000000003000019000000010300c03900000000033204360000000004040433000000000404004b0000000004000019000000010400c039000000000043043500000040031000390000000003030433000004ad0330019700000040042000390000000000340435000000600110003900000000010104330000000031010434000004f40110019700000060042000390000000000140435000000800120003900000000020304330000000000210435000000000001042d0000000043010434000000000303004b0000000003000019000000010300c03900000000033204360000000004040433000000000404004b0000000004000019000000010400c039000000000043043500000040031000390000000003030433000004ad03300197000000400420003900000000003404350000006003100039000000000303043300000060042000390000000065030434000000000054043500000000040604330000008005200039000000000045043500000040033000390000000003030433000000a004200039000000000034043500000080011000390000000001010433000000c00320003900000000410104340000000000130435000000e0012000390000000002040433000004ed022001970000000000210435000000000001042d0000006502000039000000000302041a000004d103300197000000000032041b000004d0061001970000003301000039000000000201041a000004d103200197000000000363019f000000000031041b000004ad010000410000000003000414000004ad0430009c0000000003018019000000c001300210000004d2011001c7000004d0052001970000800d020000390000000303000039000004dc0400004112af12a50000040f00000001012001900000128c0000613d000000000001042d0000000001000019000012b100010430000000000001042f000004ad03000041000004ad0410009c00000000010380190000004001100210000004ad0420009c00000000020380190000006002200210000000000112019f0000000002000414000004ad0420009c0000000002038019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000012a30000613d000000000101043b000000000001042d0000000001000019000012b100010430000012a8002104210000000102000039000000000001042d0000000002000019000000000001042d000012ad002104230000000102000039000000000001042d0000000002000019000000000001042d000012af00000432000012b00001042e000012b1000104300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff000000020000000000000000000000000000004000000100000000000000000000000000000000000000000000000000000000000000000000000000740211cd0000000000000000000000000000000000000000000000000000000099624cb500000000000000000000000000000000000000000000000000000000d113233100000000000000000000000000000000000000000000000000000000e8de12de00000000000000000000000000000000000000000000000000000000e8de12df00000000000000000000000000000000000000000000000000000000f2fde38b00000000000000000000000000000000000000000000000000000000d113233200000000000000000000000000000000000000000000000000000000e30c39780000000000000000000000000000000000000000000000000000000099624cb6000000000000000000000000000000000000000000000000000000009ec3f92700000000000000000000000000000000000000000000000000000000c4d66de80000000000000000000000000000000000000000000000000000000094ca304a0000000000000000000000000000000000000000000000000000000094ca304b0000000000000000000000000000000000000000000000000000000095570d1200000000000000000000000000000000000000000000000000000000994057ef00000000000000000000000000000000000000000000000000000000740211ce0000000000000000000000000000000000000000000000000000000079ba5097000000000000000000000000000000000000000000000000000000008da5cb5b000000000000000000000000000000000000000000000000000000003ea053ea000000000000000000000000000000000000000000000000000000005139839b000000000000000000000000000000000000000000000000000000005139839c000000000000000000000000000000000000000000000000000000005875da2b00000000000000000000000000000000000000000000000000000000715018a6000000000000000000000000000000000000000000000000000000003ea053eb00000000000000000000000000000000000000000000000000000000426cb7660000000000000000000000000000000000000000000000000000000047b4a7a60000000000000000000000000000000000000000000000000000000029092d0d0000000000000000000000000000000000000000000000000000000029092d0e000000000000000000000000000000000000000000000000000000002f9c8f0d000000000000000000000000000000000000000000000000000000003d1f16d40000000000000000000000000000000000000000000000000000000008dc336000000000000000000000000000000000000000000000000000000000189a5a17000000000000000000000000000000000000000000000000000000001c5a9d9c000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000038d16b8cac22d99fc7c124b9cd0de2d3fa1faef420bfe791d8c362d765e2270000000000000000000000000000000000000000200000008000000000000000001806aa1896bbf26568e884a7374b41e002500962caba6a15023a8d90e8508b830200000200000000000000000000000000000024000000000000000000000000647920696e697469616c697a6564000000000000000000000000000000000000496e697469616c697a61626c653a20636f6e747261637420697320616c72656108c379a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000084000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e002000000000000000000000000000000000000200000000000000000000000007f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498c7eabfa90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000200000000000000000000000000000000000040000000000000000000000000354a83ed9988f79f6038d4c7a7dadbad8af32f4ad6df893e0e5807a1b1944ff8354a83ed9988f79f6038d4c7a7dadbad8af32f4ad6df893e0e5807a1b1944ff90000000000000000000000000000000000000000ffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffff000000000000000000000000000000000000000000000000ffffffffffffff9f000000000000000000000000000000000000000000000000ffffffffffffff7f1629bfc36423a1b4749d3fe1d6970b9d32d42bbee47dd5540670696ab6b9a4adffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0001ffffffffffffffffffffffffffffffffffffffffffffffffffff0000000001010000000000000000000000000000000000000000000000000000ffffffff000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffc810fe33ecde832d7a0b696cf1738e8ec84938965b214bd4e1195ef8d6e8dd26000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000000000000000000000000000ffffffffffffffbfff00000000000000000000000000000000000000000000000000000000000000564da7df00000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000205a2493904fd7ff8685b368c0a13560ddcc577aac288385c5834c57996c9ad2000000000000000000000000000000000000000000000000ffffffffffffff5f4f776e61626c6532537465703a2063616c6c6572206973206e6f7420746865206e6577206f776e657200000000000000000000000000000000000000000000000000000000000000000000000000000000000084000000800000000000000000000000000000000000000000000000000000000000000000000000010000000002000000000000000000000000000000000000200000008000000000000000003d2e97fa99e681fa81a4ec393f21f6b0fcccca1463d6bf43cb3a80299968091c000000000000000000000000000000010000000000000000000000000000000002000000000000000000000000000000000000a0000000000000000000000000a88f0bfec4ed62be2eb084417ce4a44e7289b24faec9f1ae553fce55c475c3d25c0844614cb8580962e0ce8f75301a64eccd757945ce5620b7d84b65e232a673d9957750e6343405c319eb99a4ec67fa11cfd66969318cbc71aa2d45fa53a349164b707307f2b1481ff9787806cdcc4d8b12e532a6a84dd0a3f4c4507c1808e700ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6b0949d8000000000000000000000000000000000000000000000000000000001a956ace000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffff1fffffffffffffffffffffffffffffffffffffffff000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000020000000000000000000000000000000000012000000000000000000000000034a1749086dfd00e126255be02329843f1c05be61db5c1fc80ceefc38ccef67683ee5ccc0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000008000000000000000001088e0c600000000000000000000000000000000000000000000000000000000c5cbf870000000000000000000000000000000000000000000000000000000004f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e65720000000000000000000000000000000000000064000000800000000000000000cfc24166db4bb677e857cacabd1541fb2b30645021b27c5130419589b84db52b2fc842bf000000000000000000000000000000000000000000000000000000004e487b71000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffa07dc8b937d2916b130743c447af3d771fa55e66b7393105150e2e635ac3e87260bdc213f90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffff80000000000000000000000000000000000000000000000000ffffffffffffff605c3973df1c57590f450df32f693e00b74a06572949ddc108dd33c91827cf42f0", + "deployedBytecode": "0x00020000000000020015000000000002000100000001035500000000030100190000006003300270000004ad0030019d0000008008000039000000400080043f000004ad033001970000000102200190000000200000c13d000000040230008c000011c30000413d000000000201043b000000e002200270000004af0420009c000000280000a13d000004b00420009c0000023f0000a13d000004b10420009c000002b60000a13d000004b20420009c000002e10000213d000004b50420009c000005590000613d000004b60120009c000011c30000c13d0000000001000416000000000101004b000011c30000c13d0000006501000039000004f40000013d0000000001000416000000000101004b000011c30000c13d000000200100003900000100001004430000012000000443000004ae01000041000012b00001042e000004c10420009c000001330000213d000004c90420009c0000024c0000213d000004cd0420009c000003970000613d000004ce0420009c000003040000613d000004cf0220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000d00000001001d000004d00110009c000011c30000213d0000003301000039000000000101041a000004d0021001970000000001000411000000000212004b000000450000613d0000000d0110006b000003b50000c13d0000000d0100002900000000001004350000009801000039000c00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000000610000c13d0000000902100039000000000202041a000000000202004b000000610000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000000000402041a0000002003400270000004ad03300197000000000701043b000000000107041a0000002005100270000004ad05500197000000000353004b0000000d030000390000000703002039000004ad04400197000004ad05100197000000000654004b000000040600003900000001060020390000000006760019000000000606041a0000ff000660019000000c540000613d0000000003730019000000000303041a0000ff000330019000000c540000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000b00000007001d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000b01000029000000000101041a000a00000001001d00000000003004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000b020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000005180310009c0000102e0000813d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000b040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000c01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000004b70000013d000004c20420009c0000026f0000213d000004c60420009c000003a20000613d000004c70420009c0000034b0000613d000004c80220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000400220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0000002401100370000000000101043b000c00000001001d000004ad0110009c000011c30000213d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000000d0100002900000000001004350000009801000039000b00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b0000016d0000c13d0000000902100039000000000202041a000000000202004b0000016d0000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000000000302041a0000002004300270000004ad04400197000000000701043b000000000107041a0000002005100270000004ad05500197000000000454004b0000000d050000390000000705002039000004ad03300197000004ad04100197000000000643004b000000040600003900000001060020390000000006760019000000000606041a0000ff0006600190000a00000007001d00000f8d0000613d0000000005750019000000000505041a0000ff000550019000000f8d0000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000a01000029000000000101041a000c00000001001d00000000003004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000c02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000a020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000c0100000000920000000c0220017f000000000021041b0000000a040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000c0220017f000000000021041b0000000d0100002900000000001004350000000b01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000011940000013d000004ba0420009c0000028f0000213d000004be0120009c000005400000613d000004bf0120009c000004f70000613d000004c00120009c000011c30000c13d0000000001000416000000000101004b000011c30000c13d0000003301000039000004f40000013d000004ca0420009c000003b90000613d000004cb0420009c000003510000613d000004cc0120009c000011c30000c13d0000000001000416000000000101004b000011c30000c13d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000009b01000039000000000201041a000004ad03200197000004ad0430009c0000054f0000613d000004f6022001970000000103300039000000000223019f000000000021041b000000800030043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004fd011001c70000800d0200003900000001030000390000050404000041000010190000013d000004c30420009c000004e60000613d000004c40420009c000003760000613d000004c50120009c000011c30000c13d0000000001000416000000000101004b000011c30000c13d0000003301000039000000000201041a000004d0052001970000000003000411000000000335004b0000067a0000c13d0000006503000039000000000403041a000004d104400197000000000043041b000004d102200197000000000021041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d020000390000000303000039000004dc040000410000000006000019000010190000013d000004bb0420009c000005530000613d000004bc0420009c000005030000613d000004bd0220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000600220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0000003302000039000000000202041a000004d0032001970000000002000411000000000323004b000002a80000613d0000000d0220006b000003b50000c13d0000002402100370000000000302043b000004f402300197000c00000003001d000000000232004b000011c30000c13d0000000c0200006b000009280000c13d0000004401100370000000000101043b000000000101004b000009280000c13d0000050e01000041000003b60000013d000004b70420009c000006830000613d000004b80420009c000005760000613d000004b90220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000d00000001001d000004d00110009c000011c30000213d000000000300041a000bff0000300194000007c80000c13d000000ff0130019000000000020000190000000102006039000e00000002001d00000000020004150000000e0220008a00000020022000c9000000000101004b000007cc0000c13d000001000100008a000000000113016f00000001011001bf000004db02000041000000000121016f00000100011001bf0000000003000019000000000010041b0000000d06000029000000000106004b000009650000c13d000000400100043d000004df02000041000009460000013d000004b30420009c0000056c0000613d000004b40220009c000011c30000c13d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000601043b000004d00160009c000011c30000213d0000003301000039000000000101041a000004d0051001970000000001000411000000000115004b0000067a0000c13d0000006501000039000000000201041a000004d102200197000000000262019f000000000021041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d020000390000000303000039000004d304000041000010190000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000004d00210009c000011c30000213d00000000001004350000009801000039000000200010043f0000004002000039000000000100001912af128f0000040f0000000002010019000d00000002001d000000000102041a000900000001001d000000010120003912af11d30000040f000c00000001001d0000000d01000029000000040110003912af11d30000040f000b00000001001d0000000d01000029000000070110003912af11fc0000040f0000000d020000290000000d02200039000a00000001001d000000000102001912af11fc0000040f00000009050000290000002002500270000004ad02200197000000400400043d000d00000004001d000000200340003900000000002304350000004002500270000004ad0220019700000040034000390000000000230435000004ad025001970000000000240435000900000001001d00000060024000390000000c0100002912af12360000040f0000000d0100002900000100021000390000000b0100002912af12360000040f0000000d01000029000001a0021000390000000a0100002912af124f0000040f0000000d01000029000002a002100039000000090100002912af124f0000040f000004ad010000410000000d03000029000004ad0230009c000000000301801900000040013002100000051b011001c7000012b00001042e0000000001000416000000000101004b000011c30000c13d0000009b01000039000000000101041a000005720000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000001400220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0015000d0000002d0000002402100370000000000202043b000c00000002001d000004ad0220009c000011c30000213d0014000c0000002d001300440000003d001200a40000003d000000e402100370000000000202043b000b00000002001d000004ad0220009c000011c30000213d0011000b0000002d001001040000003d0000003302000039000000000202041a000004d0022001970000000003000411000000000232004b0000067a0000c13d0000000d0200006b000009d10000c13d000004df01000041000003b60000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000c00220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0000003302000039000000000202041a000004d0032001970000000002000411000000000323004b000003890000613d0000000d0220006b000003b50000c13d0000002402100370000000000202043b000000000202004b000007f00000c13d0000004402100370000000000202043b000000000202004b000007f00000c13d0000006402100370000000000202043b000000000202004b000007f00000c13d0000051101000041000003b60000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b00000000001004350000009a01000039000005630000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000d00000001001d000004d00110009c000011c30000213d0000003301000039000000000101041a000004d0021001970000000001000411000000000212004b000006c80000613d0000000d0110006b000006c80000613d0000051501000041000000800010043f0000050f01000041000012b1000104300000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b000d00000001001d000004d00110009c000011c30000213d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000000d0100002900000000001004350000009801000039000c00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000003e60000c13d0000000902100039000000000202041a000000000202004b000003e60000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000000000402041a0000002003400270000004ad03300197000000000701043b000000000107041a0000002005100270000004ad05500197000000000353004b0000000d030000390000000703002039000004ad04400197000004ad05100197000000000654004b000000040600003900000001060020390000000006760019000000000606041a0000ff000660019000000d3a0000613d0000000003730019000000000303041a0000ff000330019000000d3a0000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000b00000007001d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000b01000029000000000101041a000a00000001001d00000000003004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000b020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000b040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000c01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000001041b0000000102100039000000000002041b0000000202100039000000000002041b0000000302100039000000000002041b0000000402100039000000000002041b0000000502100039000000000002041b0000000602100039000000000002041b0000000702100039000000000002041b0000000802100039000000000002041b0000000902100039000000000002041b0000000a02100039000000000002041b0000000b02100039000000000002041b0000000c02100039000000000002041b0000000d02100039000000000002041b0000000e02100039000000000002041b0000000f02100039000000000002041b0000001002100039000000000002041b0000001102100039000000000002041b0000001201100039000000000001041b0000000001000414000004ad0210009c000004ad01008041000000c001100210000004d2011001c70000800d020000390000000203000039000004e80400004100000dae0000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b0000009702000039000000000302041a000000000331004b000011c30000813d0000000000200435000004e301100041000000000101041a000004d001100197000005730000013d0000000001000416000000000101004b000011c30000c13d0000006501000039000000000101041a000004d0021001970000000001000411000000000212004b000006bc0000c13d12af12740000040f0000000001000019000012b00001042e0000000001000416000000000101004b000011c30000c13d0000009701000039000b00000001001d000000000101041a000c00000001001d000004f20110009c0000102e0000213d0000000c010000290000000502100210000000bf01200039000000200300008a000000000131016f000004f20310009c0000102e0000213d000000400010043f0000000c03000029000000800030043f000000000303004b0000000003000019000008ec0000c13d000000800030043f00000020020000390000000002210436000000800300043d00000000003204350000004002100039000000000403004b000006b20000613d00000080040000390000000005000019000000200440003900000000060404330000000087060434000004ad077001970000000007720436000000000808043300000000a9080434000000000097043500000000070a0433000000400920003900000000007904350000004007800039000000000707043300000060082000390000000000780435000000400660003900000000060604330000008007200039000000008606043400000000006704350000000006080433000004ed06600197000000a0072000390000000000670435000000c0022000390000000105500039000000000635004b000005230000413d000006b20000013d0000000001000416000000000101004b000011c30000c13d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000009b01000039000000000201041a0000002003200270000004ad03300197000004ad0430009c000007b60000c13d000005160100004100000000001004350000001101000039000010310000013d0000000001000416000000000101004b000011c30000c13d0000009701000039000000000101041a000005730000013d0000000002000416000000000202004b000011c30000c13d000000040230008a000000200220008c000011c30000413d0000000401100370000000000101043b00000000001004350000009901000039000000200010043f0000004002000039000000000100001912af128f0000040f000000000101041a000000ff011001900000000001000019000000010100c039000005730000013d0000000001000416000000000101004b000011c30000c13d0000009b01000039000000000101041a0000002001100270000004ad01100197000000800010043f000004d401000041000012b00001042e0000000002000416000000000202004b000011c30000c13d000000040230008a000000400220008c000011c30000413d0000000402100370000000000202043b000d00000002001d000004d00220009c000011c30000213d0000002401100370000000000101043b000c00000001001d000004ad0110009c000011c30000213d0000003301000039000000000101041a000004d0011001970000000002000411000000000121004b0000067a0000c13d0000000d0100002900000000001004350000009801000039000b00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000005a80000c13d0000000902100039000000000202041a000000000202004b000005a80000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000701043b0000009b01000039000000000301041a000004ad04300197000000000207041a000004ad05200197000000000454004b000000040500003900000001050020390000002003300270000004ad033001970000002004200270000004ad044001970000000006750019000000000543004b0000000d050000390000000705002039000000000606041a0000ff000660019000000fcd0000613d0000000005750019000000000505041a0000ff000550019000000fcd0000613d000c00000007001d0000009701000039000000000401041a000000000304004b0000054f0000613d000004e203400041000000000303041a000004d0033001970000004002200270000004ad02200197000000000424004b00000e8e0000a13d000004e302200041000000000402041a000004d104400197000000000434019f000000000042041b000000000201041a000000000402004b000009cd0000613d000004e204200041000000000504041a000004d105500197000000000054041b000000010220008a000000000021041b0000000c01000029000000000101041a000a00000001001d00000000003004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000c020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000c040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000b01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000011940000013d000004d901000041000000800010043f0000002001000039000000840010043f000000a40010043f0000051201000041000000c40010043f0000051301000041000012b1000104300000000001000416000000000101004b000011c30000c13d0000009701000039000b00000001001d000000000101041a000c00000001001d000004f20110009c0000102e0000213d0000000c010000290000000502100210000000bf01200039000000200300008a000000000131016f000004f20310009c0000102e0000213d000000400010043f0000000c03000029000000800030043f000000000303004b0000000003000019000009100000c13d000000800030043f00000020020000390000000002210436000000800300043d00000000003204350000004002100039000000000403004b000006b20000613d0000000004000019000000200880003900000000050804330000000065050434000004ad05500197000000000552043600000000060604330000000076060434000004f406600197000000000065043500000000050704330000004006200039000000000056043500000060022000390000000104400039000000000534004b000006a20000413d0000000002120049000004ad03000041000004ad0420009c0000000002038019000004ad0410009c000000000103801900000040011002100000006002200210000000000112019f000012b00001042e000004d901000041000000800010043f0000002001000039000000840010043f0000002901000039000000a40010043f000004f901000041000000c40010043f000004fa01000041000000e40010043f000004fb01000041000012b1000104300000000d0100002900000000001004350000009801000039000c00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000006e40000c13d0000000902100039000000000202041a000000000202004b000006e40000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000000000402041a0000002003400270000004ad03300197000000000701043b000000000107041a0000002005100270000004ad05500197000000000353004b0000000d030000390000000703002039000004ad04400197000004ad05100197000000000654004b000000040600003900000001060020390000000006760019000000000606041a0000ff000660019000000cc80000613d0000000003730019000000000303041a0000ff000330019000000cc80000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000b00000007001d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000b01000029000000000101041a000a00000001001d00000000003004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000b020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000b040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000c01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000011940000013d000004ef022001970000002003300210000004fc03300041000004ee04300197000000000224019f000000000021041b0000002001300270000000800010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004fd011001c70000800d020000390000000103000039000004fe04000041000010190000013d00000000020004150000000f0220008a00000020022000c9000f00000000001d000c00000002001d000a00000003001d000004d501000041000000000010043900000000010004100000000400100443000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d6011001c7000080020200003912af12aa0000040f0000000102200190000007fd0000613d000000000101043b000000000101004b0000094d0000c13d0000000a01000029000000ff0110018f000000010110008c000000000100001900000001010060390000000c020000290000000502200270000000000201001f000009500000c13d000001000100008a000000000200041a000000000112016f000000010300003900000001011001bf0000000b0200006b000002da0000c13d000002d60000013d0000008402100370000000000202043b000000000202004b000007fe0000c13d000000a401100370000000000101043b000004ed02100197000000000212004b000011c30000c13d000004ff0110009c000007fe0000813d0000051001000041000003b60000013d000000000001042f0000000d0100002900000000001004350000009801000039000c00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b0000081a0000c13d0000000902100039000000000202041a000000000202004b0000081a0000c13d0000000a01100039000000000101041a000000000101004b000009440000613d0000000d0100002900000000001004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b06000039000000000306041a0000002002300270000004ad02200197000000000501043b000000000105041a0000002004100270000004ad04400197000000000242004b0000000d020000390000000702002039000004ad03300197000004ad04100197000000000343004b000000040300003900000001030020390000000003530019000000000303041a0000ff0003300190000b00000005001d00000e920000613d0000000002520019000000000202041a0000ff000220019000000e920000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000009cd0000613d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000b01000029000000000101041a000a00000001001d00000000003004350000000c01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000a02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000b020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000a0100000000920000000a0220017f000000000021041b0000000b040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000a0220017f000000000021041b0000000d0100002900000000001004350000000c01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000011940000013d000004e60310009c0000102e0000213d00000000030000190000006004100039000000400040043f0000000004010436000000400500043d000004e60650009c0000102e0000213d0000006006500039000000400060043f000000400650003900000000000604350000002006500039000000000006043500000000000504350000000000540435000000400400043d000004f30540009c0000102e0000213d0000004005400039000000400050043f00000020054000390000000000050435000000000004043500000040051000390000000000450435000000a00430003900000000001404350000002003300039000000000123004b00000e150000813d000000400100043d000004e60410009c000008ef0000a13d0000102e0000013d000004f30310009c0000102e0000213d00000000030000190000004004100039000000400040043f0000000004010436000000400500043d000004f30650009c0000102e0000213d0000004006500039000000400060043f0000002006500039000000000006043500000000000504350000000000540435000000a00430003900000000001404350000002003300039000000000123004b00000daf0000813d000000400100043d000004f30410009c000009130000a13d0000102e0000013d0000000d0100002900000000001004350000009801000039000b00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000009920000c13d0000000902100039000000000202041a000000000202004b000009920000c13d0000000a01100039000000000101041a000000000101004b000009920000c13d000000400100043d0000051a020000410000000000210435000004ad02000041000004ad0310009c00000000010280190000004001100210000004e0011001c7000012b1000104300000000c010000290000000501100270000000000100001f000000400100043d0000006402100039000004d70300004100000000003204350000004402100039000004d803000041000000000032043500000024021000390000002e030000390000000000320435000004d9020000410000000000210435000000040210003900000020030000390000000000320435000004ad02000041000004ad0310009c00000000010280190000004001100210000004da011001c7000012b100010430000c00000003001d0000006501000039000000000201041a000004d102200197000000000021041b0000003301000039000000000201041a000004d103200197000000000363019f000000000031041b000004ad010000410000000003000414000004ad0430009c0000000003018019000000c001300210000004d2011001c7000004d0052001970000800d020000390000000303000039000004dc0400004112af12a50000040f0000000101200190000011c30000613d0000000c0100006b0000101c0000c13d000004db01000041000000000200041a000000000112016f000000000010041b0000000103000039000000400100043d0000000000310435000004ad020000410000000004000414000004ad0540009c0000000004028019000004ad0510009c00000000010280190000004001100210000000c002400210000000000112019f000004dd011001c70000800d02000039000004de04000041000010190000013d0000000d0100002900000000001004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000009b02000039000900000002001d000000000302041a0000002002300270000004ad02200197000000000501043b000000000105041a0000002004100270000004ad04400197000000000242004b0000000d020000390000000702002039000004ad03300197000004ad04100197000000000343004b000000040300003900000001030020390000000003530019000000000303041a0000ff0003300190000a00000005001d0000101e0000613d0000000002520019000000000202041a0000ff00022001900000101e0000613d0000009702000039000000000402041a000000000304004b0000054f0000613d000004e203400041000000000303041a000004d0033001970000004001100270000004ad01100197000000000414004b00000e8e0000a13d000004e301100041000000000401041a000004d104400197000000000434019f000000000041041b000000000102041a000000000401004b000010fd0000c13d000005160100004100000000001004350000003101000039000010310000013d0000004402100370000000000202043b000000000202004b000009dd0000c13d0000006402100370000000000202043b000000000202004b000009dd0000c13d0000008402100370000000000202043b000000000202004b000003950000613d000000a402100370000000000202043b000000000202004b000009e80000c13d000000c402100370000000000202043b000004ed03200197000000000323004b000011c30000c13d000004ec0220009c000007fb0000a13d0000010402100370000000000202043b000004f403200197000000000323004b000011c30000c13d000005050220009c000009f30000213d0000012401100370000000000101043b000000000101004b000002b40000613d0000000d0100002900000000001004350000009801000039000a00000001001d000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b0000000802100039000000000202041a000000000202004b000010f70000c13d0000000902100039000000000202041a000000000202004b000010f70000c13d0000000a01100039000000000101041a000000000101004b000010f70000c13d00000001010003670000010402100370000000000302043b000004f402300197000000000232004b000011c30000c13d0000012401100370000000000201043b000000400100043d000000400410003900000000002404350000002002100039000000000032043500000040030000390000000000310435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000900000001001d00000000001004350000009901000039000800000001001d000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000400200043d000000000101043b000000000101041a000000ff01100190000011cb0000c13d00000001010003670000004403100370000000000303043b0000006404100370000000000404043b0000008401100370000000000101043b00000060052000390000000000150435000000400120003900000000004104350000002001200039000000000031043500000060030000390000000000320435000004e70320009c0000102e0000213d0000008003200039000000400030043f000004ad04000041000004ad0310009c000000000104801900000040011002100000000002020433000004ad0320009c00000000020480190000006002200210000000000112019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000700000001001d00000000001004350000009a01000039000600000001001d000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000101041a000000ff01100190000011c50000c13d0000009703000039000000000203041a000004ad01200197000004f20420009c0000102e0000213d0000000104200039000000000043041b0000000000300435000004e302200041000000000302041a000004d1033001970000000d033001af000000000032041b000000400200043d000004e70320009c0000102e0000213d0000008003200039000000400030043f00000040032000390000000b0400002900000000004304350000000107000039000000000372043600000000000304350000000008000031000001040380008a0000050804000041000000400530008c000000000500001900000000050440190000050803300197000000000603004b000000000400a019000005080330009c000000000405c019000000000304004b000011c30000c13d000000400300043d000004f30430009c0000102e0000213d0000004004300039000000400040043f00000001060003670000010404600370000000000404043b000004f405400197000000000545004b000011c30000c13d00000000044304360000012405600370000000000505043b000000000054043500000060042000390000000000340435000000400400043d000004f30340009c0000102e0000213d0000004003400039000000400030043f000000200340003900000000000304350000000000040435000000400300043d000004e70530009c0000102e0000213d0000008005300039000000400050043f00000060053000390000000000450435000000400430003900000000000404350000002004300039000000000004043500000000000304350000009b04000039000000000404041a000000400500043d000004f80950009c0000102e0000213d000000a009500039000000400090043f00000040095000390000000c0a0000290000000000a90435000000440880008a0000050809000041000000600a80008c000000000a000019000000000a0940190000050808800197000000000b08004b000000000900a019000005080880009c00000000090ac01900000000077504360000000000070435000000000709004b000011c30000c13d000000400700043d000004e60870009c0000102e0000213d0000006008700039000000400080043f0000004408600370000000000808043b00000000088704360000006409600370000000000909043b00000000009804350000008408600370000000000808043b0000004009700039000000000089043500000060085000390000000000780435000000400700043d000004f30870009c0000102e0000213d0000004008700039000000400080043f000000a408600370000000000808043b0000000008870436000000c406600370000000000606043b000004ed09600197000000000969004b000011c30000c13d000000000068043500000080065000390000000000760435000000400600043d000004e60760009c0000102e0000213d0000006007600039000000400070043f00000040076000390000000000070435000000200760003900000000000704350000000000060435000000400800043d000004f30780009c0000102e0000213d0000004007800039000000400070043f000000200780003900000000000704350000000000080435000000400700043d000004f80970009c0000102e0000213d000000a009700039000000400090043f000000800970003900000000008904350000006008700039000000000068043500000040067000390000000000060435000000200670003900000000000604350000000000070435000000400600043d000c00000006001d000005090660009c0000102e0000213d0000000c08000029000000e006800039000000400060043f000000c006800039000b00000006001d0000000000760435000000a006800039000500000006001d00000000005604350000008005800039000400000005001d00000000003504350000006003800039000300000003001d00000000002304350000004002800039000200000002001d0000000000120435000004ad0140019700000000021804360000002001400270000004ad01100197000100000002001d00000000001204350000000d0100002900000000001004350000000a01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000c020000290000000002020433000004ad02200197000000000101043b000000000301041a0000050a03300197000000000223019f000000010300002900000000030304330000002003300210000004ee03300197000000000232019f000000020300002900000000030304330000004003300210000004e403300197000000000232019f000000000021041b0000000102100039000000000302041a0000050b03300197000000030400002900000000040404330000000065040434000000000505004b000000010330c1bf0000000005060433000000000505004b000001000330c1bf000000400540003900000000050504330000001005500210000004eb05500197000000000353019f000000000032041b00000060024000390000000203100039000d010000000092000000000403041a0000000d0440017f00000000020204330000000052020434000000f802200270000000000224019f000000000023041b00000000020504330000000303100039000000000023041b0000000402100039000000000302041a0000050b03300197000000040400002900000000040404330000000065040434000000000505004b000000010330c1bf0000000005060433000000000505004b000001000330c1bf000000400540003900000000050504330000001005500210000004eb05500197000000000353019f000000000032041b0000000502100039000000000302041a0000000d0330017f000000600440003900000000040404330000000054040434000000f804400270000000000343019f000000000032041b00000000020504330000000603100039000000000023041b0000000702100039000000000302041a0000050b03300197000000050400002900000000040404330000000065040434000000000505004b000000010330c1bf0000000005060433000000000505004b000001000330c1bf000000400540003900000000050504330000001005500210000004eb05500197000000000353019f000000000032041b0000006002400039000000000202043300000008031000390000000065020434000000000053041b00000000030604330000000905100039000000000035041b000000400220003900000000020204330000000a03100039000000000023041b000000800240003900000000020204330000000b031000390000000042020434000000000023041b000000000204043300000080022002700000000c03100039000000000403041a000004ed04400197000000000224019f000000000023041b0000000d02100039000000000302041a0000050b033001970000000b0400002900000000040404330000000065040434000000000505004b000000010330c1bf0000000005060433000000000505004b000001000330c1bf000000400540003900000000050504330000001005500210000004eb05500197000000000353019f000000000032041b000000600240003900000000020204330000000e031000390000000065020434000000000053041b00000000030604330000000f05100039000000000035041b000000400220003900000000020204330000001003100039000000000023041b0000008002400039000000000202043300000011031000390000000042020434000000000023041b000000120110003900000000020404330000008002200270000000000301041a000004ed03300197000000000223019f000000000021041b000000090100002900000000001004350000000801000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000d0220017f00000001022001bf000000000021041b000000070100002900000000001004350000000601000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000d0220017f00000001022001bf000000000021041b000004d001000041000000150510017f000004ad01000041000000140310017f0000001104000029000000100200002900000012060000290000001307000029000000400100043d00000000083104360000000103000367000000000973034f000000000909043b00000000009804350000002008700039000000000883034f000000000808043b000000400910003900000000008904350000004007700039000000000773034f000000000707043b00000060081000390000000000780435000000000763034f000000000707043b000000800810003900000000007804350000002006600039000000000663034f000000000606043b000004ed07600197000000000767004b000011c30000c13d000004ad04400197000000c0071000390000000000470435000000a0041000390000000000640435000000000423034f000000000404043b000004f406400197000000000646004b000011c30000c13d000000e00610003900000000004604350000002002200039000000000223034f000000000202043b00000100031000390000000000230435000004ad020000410000000003000414000004ad0430009c0000000003028019000004ad0410009c00000000010280190000004001100210000000c002300210000000000112019f0000050c011001c70000800d0200003900000002030000390000050d04000041000010190000013d0000000103700039000000000154004b00000c7c0000a13d0000000401700039000000000401041a000001000500008a000000000454016f000000000603041a000000ff06600190000000010440c1bf000000000041041b000004e906000041000000000464016f000000000603041a0000ff0006600190000001000440c1bf000000000041041b000004ea04400197000000000603041a000004eb06600197000000000464019f000000000041041b0000000501700039000000000401041a000000000454016f0000000205700039000000000505041a000000ff0550018f000000000454019f000000000041041b0000000301700039000000000101041a0000000604700039000000000014041b000000000102041a000004ad01100197000000000407041a000004f604400197000000000114019f000000000017041b000000000403041a000001000100008a000000000414016f00000001044001bf000000000043041b0000000703700039000000000402041a0000002004400270000004ad04400197000000000507041a0000002005500270000004ad05500197000000000445004b00000cba0000813d0000000d04700039000000000504041a000000000515016f000000000603041a000000ff06600190000000010550c1bf000000000054041b000004e906000041000000000565016f000000000603041a0000ff0006600190000001000550c1bf000000000054041b000004ea05500197000000000603041a000004eb06600197000000000565019f000000000054041b0000000804700039000000000404041a0000000e05700039000000000045041b0000000904700039000000000404041a0000000f05700039000000000045041b0000000a04700039000000000404041a0000001005700039000000000045041b0000000b04700039000000000404041a0000001105700039000000000045041b0000000c04700039000000000404041a000004ec044001970000001205700039000000000605041a000004ed06600197000000000446019f000000000045041b000000000202041a000004ee02200197000000000407041a000004ef04400197000000000224019f000000000027041b000000000203041a000000000112016f00000001011001bf000000000013041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d0200003900000002030000390000051904000041000010180000013d0000000103700039000000000154004b00000cf00000a13d0000000401700039000000000401041a000001000500008a000000000454016f000000000603041a000000ff06600190000000010440c1bf000000000041041b000004e906000041000000000464016f000000000603041a0000ff0006600190000001000440c1bf000000000041041b000004ea04400197000000000603041a000004eb06600197000000000464019f000000000041041b0000000501700039000000000401041a000000000454016f0000000205700039000000000505041a000000ff0550018f000000000454019f000000000041041b0000000301700039000000000101041a0000000604700039000000000014041b000000000102041a000004ad01100197000000000407041a000004f604400197000000000114019f000000000017041b000000000403041a000001000100008a000000000414016f000000000043041b0000000703700039000000000402041a0000002004400270000004ad04400197000000000507041a0000002005500270000004ad05500197000000000445004b00000d2d0000813d0000000d04700039000000000504041a000000000515016f000000000603041a000000ff06600190000000010550c1bf000000000054041b000004e906000041000000000565016f000000000603041a0000ff0006600190000001000550c1bf000000000054041b000004ea05500197000000000603041a000004eb06600197000000000565019f000000000054041b0000000804700039000000000404041a0000000e05700039000000000045041b0000000904700039000000000404041a0000000f05700039000000000045041b0000000a04700039000000000404041a0000001005700039000000000045041b0000000b04700039000000000404041a0000001105700039000000000045041b0000000c04700039000000000404041a000004ec044001970000001205700039000000000605041a000004ed06600197000000000446019f000000000045041b000000000202041a000004ee02200197000000000407041a000004ef04400197000000000224019f000000000027041b000000000203041a000000000112016f000000000013041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d0200003900000002030000390000050304000041000010180000013d0000000103700039000000000154004b00000d620000a13d0000000401700039000000000401041a000001000500008a000000000454016f000000000603041a000000ff06600190000000010440c1bf000000000041041b000004e906000041000000000464016f000000000603041a0000ff0006600190000001000440c1bf000000000041041b000004ea04400197000000000603041a000004eb06600197000000000464019f000000000041041b0000000501700039000000000401041a000000000454016f0000000205700039000000000505041a000000ff0550018f000000000454019f000000000041041b0000000301700039000000000101041a0000000604700039000000000014041b000000000102041a000004ad01100197000000000407041a000004f604400197000000000114019f000000000017041b000000000403041a000004db01000041000000000414016f00000100044001bf000000000043041b0000000703700039000000000402041a0000002004400270000004ad04400197000000000507041a0000002005500270000004ad05500197000000000445004b00000da10000813d0000000d04700039000000000504041a000001000600008a000000000565016f000000000603041a000000ff06600190000000010550c1bf000000000054041b000004e906000041000000000565016f000000000603041a0000ff0006600190000001000550c1bf000000000054041b000004ea05500197000000000603041a000004eb06600197000000000565019f000000000054041b0000000804700039000000000404041a0000000e05700039000000000045041b0000000904700039000000000404041a0000000f05700039000000000045041b0000000a04700039000000000404041a0000001005700039000000000045041b0000000b04700039000000000404041a0000001105700039000000000045041b0000000c04700039000000000404041a000004ec044001970000001205700039000000000605041a000004ed06600197000000000446019f000000000045041b000000000202041a000004ee02200197000000000407041a000004ef04400197000000000224019f000000000027041b000000000203041a000000000112016f00000100011001bf000000000013041b000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004d2011001c70000800d0200003900000002030000390000051404000041000010180000013d0000009b01000039000000000101041a000704ad0010019b000900980000003d000880100000003d0000000007000019000600000000001d000a00000008001d00000dbb0000013d00000001077000390000000c0170006c000010f40000813d0000000b01000029000000000101041a000000000171004b00000e8e0000a13d000d00000007001d000004e301700041000000000101041a000004d00110019700000000001004350000000901000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000000080200002912af12aa0000040f0000000a080000290000000102200190000011c30000613d000000000101043b000000000201041a000004ad02200197000000070220006b00000004020000390000000102002039000000400300043d000004e70430009c0000000d070000290000102e0000213d00000000051200190000008001300039000000400010043f000000000205041a0000001001200270000004ad0410019700000040013000390000000000410435000000ff042001900000000004000019000000010400c03900000000044304360000ff00022001900000000002000019000000010200c0390000000000240435000000400200043d000004f30620009c0000102e0000213d0000004006200039000000400060043f0000000106500039000000000606041a000000f80660021000000000066204360000000205500039000000000505041a0000000000560435000000600530003900000000002504350000000003030433000000000303004b00000db80000613d0000000003040433000000000303004b00000db80000c13d000000400300043d000004f30430009c0000102e0000213d00000000010104330000004004300039000000400040043f00000020043000390000000000240435000004ad011001970000000000130435000000800100043d000000060110006c00000e8e0000a13d00000006020000290000000501200210000000a0011000390000000000310435000000800100043d000000000121004b00000e8e0000a13d0000000601000029000600010010003d00000db80000013d0000009b01000039000000000101041a0000002001100270000804ad0010019b000a00980000003d000980100000003d0000000008000019000700000000001d00000e230000013d0000000701000029000700010010003d00000001088000390000000c0180006c000010fa0000813d0000000b01000029000000000101041a000000000181004b00000e8e0000a13d000d00000008001d000004e301800041000000000101041a000004d00110019700000000001004350000000a01000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000000090200002912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000002002200270000004ad02200197000000080220006b0000000d030000390000000703002039000000400200043d000004f80420009c0000000d080000290000102e0000213d0000000004130019000000a001200039000000400010043f000000000504041a0000001001500270000004ad0310019700000040012000390000000000310435000000ff035001900000000003000019000000010300c03900000000033204360000ff00055001900000000005000019000000010500c0390000000000530435000000400500043d000004e60650009c0000102e0000213d0000006006500039000000400060043f0000000106400039000000000606041a00000000066504360000000207400039000000000707041a00000000007604350000000306400039000000000606041a0000004007500039000000000067043500000060062000390000000000560435000000400500043d000004f30750009c0000102e0000213d0000004007500039000000400070043f0000000407400039000000000707041a00000000077504360000000504400039000000000404041a00000080044002100000000000470435000000800420003900000000005404350000000002020433000000000202004b00000e200000613d0000000002030433000000000202004b00000e200000c13d000000400200043d000004e60320009c0000102e0000213d000000000101043300000000030604330000006004200039000000400040043f0000004004200039000000000054043500000020042000390000000000340435000004ad011001970000000000120435000000800100043d000000070110006c00000e8e0000a13d00000007030000290000000501300210000000a0011000390000000000210435000000800100043d000000000131004b00000e1e0000213d000005160100004100000000001004350000003201000039000010310000013d000a00000006001d0000000a0150003900000009025000390000000803500039000700000003001d000000000303041a000800000002001d000000000202041a000900000001001d000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000200210003900000000003204350000006003000039000c00000003001d0000000000310435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000600000001001d000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0005010000000092000000050220017f000000000021041b00000001010003670000002402100370000000000302043b0000004402100370000000000202043b0000006401100370000000000401043b000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000200210003900000000003204350000000c030000290000000000310435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000c00000001001d00000000001004350000000601000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000101041a000000ff01100190000011c50000c13d0000000c0100002900000000001004350000000601000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000000050220017f00000001022001bf000000000021041b0000000b030000290000000b013000390000000a02000029000000000202041a0000002002200270000004ad02200197000000000303041a0000002003300270000004ad03300197000000000223004b00000f590000813d0000000b060000290000000d02600039000000000302041a000000050330017f0000000704600039000000000504041a000000ff05500190000000010330c1bf000000000032041b000004e905000041000000000353016f000000000504041a0000ff0005500190000001000330c1bf000000000032041b000004ea03300197000000000404041a000004eb04400197000000000343019f000000000032041b0000000702000029000000000202041a0000000e03600039000000000023041b0000000802000029000000000202041a0000000f03600039000000000023041b0000000902000029000000000202041a0000001003600039000000000023041b000000000201041a0000001103600039000000000023041b0000000c02600039000000000202041a000004ec022001970000001203600039000000000403041a000004ed04400197000000000224019f000000000023041b0000000a02000029000000000202041a000004ee02200197000000000306041a000004ef03300197000000000223019f000000000026041b00000001060003670000002402600370000000000202043b0000000703000029000000000023041b0000004403600370000000000303043b0000000804000029000000000034041b0000006404600370000000000404043b0000000905000029000000000045041b0000008405600370000000000505043b000000000051041b000000a401600370000000000101043b000004ed06100197000000000616004b000011c30000c13d0000000b060000290000000c06600039000000000706041a000004ed077001970000008008100270000000000787019f000000000076041b000000400600043d000000800760003900000000001704350000006001600039000000000051043500000040016000390000000000410435000000200160003900000000003104350000000000260435000004ad010000410000000002000414000004ad0320009c0000000002018019000004ad0360009c00000000060180190000004001600210000000c002200210000000000112019f00000500011001c70000800d0200003900000002030000390000050104000041000010180000013d0000000101700039000000000343004b00000fb60000a13d0000000a070000290000000403700039000000000403041a000001000500008a000000000454016f000000000601041a000000ff06600190000000010440c1bf000000000043041b000004e906000041000000000464016f000000000601041a0000ff0006600190000001000440c1bf000000000043041b000004ea04400197000000000601041a000004eb06600197000000000464019f000000000043041b0000000503700039000000000403041a000000000454016f0000000205700039000000000505041a000000ff0550018f000000000454019f000000000043041b0000000303700039000000000303041a0000000604700039000000000034041b000000000202041a000004ad02200197000000000307041a000004f603300197000000000223019f000000000027041b0000000c040000290000001002400210000004eb02200197000000000301041a000004f003300197000000000223019f000000000021041b000000400100043d0000000000410435000004ad020000410000000003000414000004ad0430009c0000000003028019000004ad0410009c00000000010280190000004001100210000000c002300210000000000112019f000004dd011001c70000800d0200003900000002030000390000050204000041000010180000013d00000000060700190000000702700039000000000343004b000010020000a13d0000000d03600039000000000403041a000001000500008a000000000454016f000000000502041a000000ff05500190000000010440c1bf000000000043041b000004e905000041000000000454016f000000000502041a0000ff0005500190000001000440c1bf000000000043041b000004ea04400197000000000502041a000004eb05500197000000000454019f000000000043041b0000000803600039000000000303041a0000000e04600039000000000034041b0000000903600039000000000303041a0000000f04600039000000000034041b0000000a03600039000000000303041a0000001004600039000000000034041b0000000b03600039000000000303041a0000001104600039000000000034041b0000000c03600039000000000303041a000004ec033001970000001204600039000000000504041a000004ed05500197000000000335019f000000000034041b000000000101041a000004ee01100197000000000306041a000004ef03300197000000000113019f000000000016041b0000000c040000290000001001400210000004eb01100197000000000302041a000004f003300197000000000113019f000000000012041b000000400100043d0000000000410435000004ad020000410000000003000414000004ad0430009c0000000003028019000004ad0410009c00000000010280190000004001100210000000c002300210000000000112019f000004dd011001c70000800d020000390000000203000039000004f1040000410000000d0500002912af12a50000040f0000000101200190000011c30000613d0000000001000019000012b00001042e00000003015000390000000202500039000700000002001d000000000302041a000800000001001d000000000201041a000000400100043d000000400410003900000000002404350000004002000039000b00000002001d0000000002210436000000f8033002100000000000320435000004e60310009c000010340000a13d000005160100004100000000001004350000004101000039000000040010043f0000051701000041000012b1000104300000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000600000001001d000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0005010000000092000000050220017f000000000021041b00000001010003670000002402100370000000000302043b000004f402300197000000000232004b000011c30000c13d0000004401100370000000000201043b000000400100043d00000040041000390000000000240435000000200210003900000000003204350000000b030000290000000000310435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000b00000001001d00000000001004350000000601000029000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000101041a000000ff01100190000011c80000c13d0000000b0100002900000000001004350000000601000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000000050220017f00000001022001bf000000000021041b0000000901000029000000000101041a000004ad011001970000000a02000029000000000202041a000004ad02200197000000000112004b000010d60000813d0000000a050000290000000401500039000000000201041a000000050220017f0000000103500039000000000403041a000000ff04400190000000010220c1bf000000000021041b000004e904000041000000000242016f000000000403041a0000ff0004400190000001000220c1bf000000000021041b000004ea02200197000000000303041a000004eb03300197000000000232019f000000000021041b0000000501500039000000000201041a000000050220017f0000000703000029000000000303041a000000ff0330018f000000000232019f000000000021041b0000000801000029000000000101041a0000000602500039000000000012041b0000000901000029000000000101041a000004ad01100197000000000205041a000004f602200197000000000112019f000000000015041b0000000703000029000000000103041a000000050110017f0000000c04000029000000f802400270000000000121019f000000000013041b00000044010000390000000101100367000000000101043b0000000802000029000000000012041b000000400200043d000000200320003900000000001304350000000000420435000004ad010000410000000003000414000004ad0430009c0000000003018019000004ad0420009c00000000020180190000004001200210000000c002300210000000000112019f000004e1011001c70000800d020000390000000203000039000004f704000041000010180000013d000000400100043d0000000603000029000006990000013d000000400100043d0000050602000041000009460000013d000000400100043d0000000703000029000005190000013d000004e204100041000000000504041a000004d105500197000000000054041b000000010110008a000000000012041b0000000a01000029000000000101041a000c00000001001d00000000003004350000000b01000029000000200010043f000004ad010000410000000002000414000004ad0320009c0000000002018019000000c001200210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d0000000c02000029000004e402200197000000000101043b000000000301041a000004e503300197000000000223019f000000000021041b0000000a020000290000000201200039000000000301041a0000000301200039000000000201041a000000400100043d0000004004100039000000000024043500000040020000390000000002210436000000f8033002100000000000320435000004e60310009c0000102e0000213d0000006003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009901000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a000c0100000000920000000c0220017f000000000021041b0000000a040000290000000801400039000000000301041a0000000901400039000000000201041a0000000a01400039000000000401041a000000400100043d0000006005100039000000000045043500000040041000390000000000240435000000600200003900000000022104360000000000320435000004e70310009c0000102e0000213d0000008003100039000000400030043f000004ad04000041000004ad0320009c000000000204801900000040022002100000000001010433000004ad0310009c00000000010480190000006001100210000000000121019f0000000002000414000004ad0320009c0000000002048019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b00000000001004350000009a01000039000000200010043f0000000001000414000004ad0210009c000004ad01008041000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000201041a0000000c0220017f000000000021041b0000000d0100002900000000001004350000000b01000029000000200010043f000004ad030000410000000001000414000004ad0210009c0000000001038019000000c001100210000004e1011001c7000080100200003912af12aa0000040f0000000102200190000011c30000613d000000000101043b000000000001041b0000000102100039000000000002041b0000000202100039000000000002041b0000000302100039000000000002041b0000000402100039000000000002041b0000000502100039000000000002041b0000000602100039000000000002041b0000000702100039000000000002041b0000000802100039000000000002041b0000000902100039000000000002041b0000000a02100039000000000002041b0000000b02100039000000000002041b0000000c02100039000000000002041b0000000d02100039000000000002041b0000000e02100039000000000002041b0000000f02100039000000000002041b0000001002100039000000000002041b0000001102100039000000000002041b0000001201100039000000000001041b0000000001000414000004ad0210009c000004ad01008041000000c001100210000004d2011001c70000800d020000390000000203000039000004e804000041000010180000013d0000000001000019000012b100010430000000400100043d0000050702000041000009460000013d000000400100043d000004f502000041000009460000013d000004f5010000410000000000120435000004ad01000041000004ad0320009c00000000020180190000004001200210000004e0011001c7000012b1000104300000000002010019000000400100043d0000051c0310009c000011f60000813d0000008003100039000000400030043f000000000302041a0000001004300270000004ad04400197000000400510003900000000004504350000ff00043001900000000004000019000000010400c03900000020051000390000000000450435000000ff033001900000000003000019000000010300c0390000000000310435000000400300043d000004f30430009c000011f60000213d0000004004300039000000400040043f0000000104200039000000000404041a000000f80440021000000000044304360000000202200039000000000202041a000000000024043500000060021000390000000000320435000000000001042d000005160100004100000000001004350000004101000039000000040010043f0000051701000041000012b1000104300000000002010019000000400100043d0000051d0310009c000012300000813d000000a003100039000000400030043f000000000302041a0000001004300270000004ad04400197000000400510003900000000004504350000ff00043001900000000004000019000000010400c03900000020051000390000000000450435000000ff033001900000000003000019000000010300c0390000000000310435000000400300043d000004e60430009c000012300000213d0000006004300039000000400040043f0000000104200039000000000404041a00000000044304360000000205200039000000000505041a00000000005404350000000304200039000000000404041a0000004005300039000000000045043500000060041000390000000000340435000000400300043d000004f30430009c000012300000213d0000004004300039000000400040043f0000000404200039000000000404041a00000000044304360000000502200039000000000202041a0000008002200210000000000024043500000080021000390000000000320435000000000001042d000005160100004100000000001004350000004101000039000000040010043f0000051701000041000012b1000104300000000043010434000000000303004b0000000003000019000000010300c03900000000033204360000000004040433000000000404004b0000000004000019000000010400c039000000000043043500000040031000390000000003030433000004ad0330019700000040042000390000000000340435000000600110003900000000010104330000000031010434000004f40110019700000060042000390000000000140435000000800120003900000000020304330000000000210435000000000001042d0000000043010434000000000303004b0000000003000019000000010300c03900000000033204360000000004040433000000000404004b0000000004000019000000010400c039000000000043043500000040031000390000000003030433000004ad03300197000000400420003900000000003404350000006003100039000000000303043300000060042000390000000065030434000000000054043500000000040604330000008005200039000000000045043500000040033000390000000003030433000000a004200039000000000034043500000080011000390000000001010433000000c00320003900000000410104340000000000130435000000e0012000390000000002040433000004ed022001970000000000210435000000000001042d0000006502000039000000000302041a000004d103300197000000000032041b000004d0061001970000003301000039000000000201041a000004d103200197000000000363019f000000000031041b000004ad010000410000000003000414000004ad0430009c0000000003018019000000c001300210000004d2011001c7000004d0052001970000800d020000390000000303000039000004dc0400004112af12a50000040f00000001012001900000128c0000613d000000000001042d0000000001000019000012b100010430000000000001042f000004ad03000041000004ad0410009c00000000010380190000004001100210000004ad0420009c00000000020380190000006002200210000000000112019f0000000002000414000004ad0420009c0000000002038019000000c002200210000000000112019f000004d2011001c7000080100200003912af12aa0000040f0000000102200190000012a30000613d000000000101043b000000000001042d0000000001000019000012b100010430000012a8002104210000000102000039000000000001042d0000000002000019000000000001042d000012ad002104230000000102000039000000000001042d0000000002000019000000000001042d000012af00000432000012b00001042e000012b1000104300000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff000000020000000000000000000000000000004000000100000000000000000000000000000000000000000000000000000000000000000000000000740211cd0000000000000000000000000000000000000000000000000000000099624cb500000000000000000000000000000000000000000000000000000000d113233100000000000000000000000000000000000000000000000000000000e8de12de00000000000000000000000000000000000000000000000000000000e8de12df00000000000000000000000000000000000000000000000000000000f2fde38b00000000000000000000000000000000000000000000000000000000d113233200000000000000000000000000000000000000000000000000000000e30c39780000000000000000000000000000000000000000000000000000000099624cb6000000000000000000000000000000000000000000000000000000009ec3f92700000000000000000000000000000000000000000000000000000000c4d66de80000000000000000000000000000000000000000000000000000000094ca304a0000000000000000000000000000000000000000000000000000000094ca304b0000000000000000000000000000000000000000000000000000000095570d1200000000000000000000000000000000000000000000000000000000994057ef00000000000000000000000000000000000000000000000000000000740211ce0000000000000000000000000000000000000000000000000000000079ba5097000000000000000000000000000000000000000000000000000000008da5cb5b000000000000000000000000000000000000000000000000000000003ea053ea000000000000000000000000000000000000000000000000000000005139839b000000000000000000000000000000000000000000000000000000005139839c000000000000000000000000000000000000000000000000000000005875da2b00000000000000000000000000000000000000000000000000000000715018a6000000000000000000000000000000000000000000000000000000003ea053eb00000000000000000000000000000000000000000000000000000000426cb7660000000000000000000000000000000000000000000000000000000047b4a7a60000000000000000000000000000000000000000000000000000000029092d0d0000000000000000000000000000000000000000000000000000000029092d0e000000000000000000000000000000000000000000000000000000002f9c8f0d000000000000000000000000000000000000000000000000000000003d1f16d40000000000000000000000000000000000000000000000000000000008dc336000000000000000000000000000000000000000000000000000000000189a5a17000000000000000000000000000000000000000000000000000000001c5a9d9c000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000038d16b8cac22d99fc7c124b9cd0de2d3fa1faef420bfe791d8c362d765e2270000000000000000000000000000000000000000200000008000000000000000001806aa1896bbf26568e884a7374b41e002500962caba6a15023a8d90e8508b830200000200000000000000000000000000000024000000000000000000000000647920696e697469616c697a6564000000000000000000000000000000000000496e697469616c697a61626c653a20636f6e747261637420697320616c72656108c379a0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000084000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e002000000000000000000000000000000000000200000000000000000000000007f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498c7eabfa90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000000000000000000000000200000000000000000000000000000000000040000000000000000000000000354a83ed9988f79f6038d4c7a7dadbad8af32f4ad6df893e0e5807a1b1944ff8354a83ed9988f79f6038d4c7a7dadbad8af32f4ad6df893e0e5807a1b1944ff90000000000000000000000000000000000000000ffffffff0000000000000000ffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffff000000000000000000000000000000000000000000000000ffffffffffffff9f000000000000000000000000000000000000000000000000ffffffffffffff7f1629bfc36423a1b4749d3fe1d6970b9d32d42bbee47dd5540670696ab6b9a4adffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0001ffffffffffffffffffffffffffffffffffffffffffffffffffff0000000001010000000000000000000000000000000000000000000000000000ffffffff000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000ffffc810fe33ecde832d7a0b696cf1738e8ec84938965b214bd4e1195ef8d6e8dd26000000000000000000000000000000000000000000000000ffffffffffffffff000000000000000000000000000000000000000000000000ffffffffffffffbfff00000000000000000000000000000000000000000000000000000000000000564da7df00000000000000000000000000000000000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffffffff00000000205a2493904fd7ff8685b368c0a13560ddcc577aac288385c5834c57996c9ad2000000000000000000000000000000000000000000000000ffffffffffffff5f4f776e61626c6532537465703a2063616c6c6572206973206e6f7420746865206e6577206f776e657200000000000000000000000000000000000000000000000000000000000000000000000000000000000084000000800000000000000000000000000000000000000000000000000000000000000000000000010000000002000000000000000000000000000000000000200000008000000000000000003d2e97fa99e681fa81a4ec393f21f6b0fcccca1463d6bf43cb3a80299968091c000000000000000000000000000000010000000000000000000000000000000002000000000000000000000000000000000000a0000000000000000000000000a88f0bfec4ed62be2eb084417ce4a44e7289b24faec9f1ae553fce55c475c3d25c0844614cb8580962e0ce8f75301a64eccd757945ce5620b7d84b65e232a673d9957750e6343405c319eb99a4ec67fa11cfd66969318cbc71aa2d45fa53a349164b707307f2b1481ff9787806cdcc4d8b12e532a6a84dd0a3f4c4507c1808e700ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6b0949d8000000000000000000000000000000000000000000000000000000001a956ace000000000000000000000000000000000000000000000000000000008000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffff1fffffffffffffffffffffffffffffffffffffffff000000000000000000000000ffffffffffffffffffffffffffffffffffffffffffffffffffff000000000000020000000000000000000000000000000000012000000000000000000000000034a1749086dfd00e126255be02329843f1c05be61db5c1fc80ceefc38ccef67683ee5ccc0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000040000008000000000000000001088e0c600000000000000000000000000000000000000000000000000000000c5cbf870000000000000000000000000000000000000000000000000000000004f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e65720000000000000000000000000000000000000064000000800000000000000000cfc24166db4bb677e857cacabd1541fb2b30645021b27c5130419589b84db52b2fc842bf000000000000000000000000000000000000000000000000000000004e487b71000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000024000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffffa07dc8b937d2916b130743c447af3d771fa55e66b7393105150e2e635ac3e87260bdc213f90000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000003a0000000000000000000000000000000000000000000000000000000000000000000000000ffffffffffffff80000000000000000000000000000000000000000000000000ffffffffffffff605c3973df1c57590f450df32f693e00b74a06572949ddc108dd33c91827cf42f0", + "linkReferences": {}, + "deployedLinkReferences": {}, + "factoryDeps": {} +} diff --git a/zk_toolbox/crates/zk_inception/build.rs b/zk_toolbox/crates/zk_inception/build.rs new file mode 100644 index 00000000000..43c8d7a5aac --- /dev/null +++ b/zk_toolbox/crates/zk_inception/build.rs @@ -0,0 +1,11 @@ +use std::path::PathBuf; + +use ethers::contract::Abigen; + +fn main() -> eyre::Result<()> { + let outdir = PathBuf::from(std::env::var("OUT_DIR")?).canonicalize()?; + Abigen::new("ConsensusRegistry", "abi/ConsensusRegistry.json")? + .generate()? + .write_to_file(outdir.join("consensus_registry_abi.rs"))?; + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs b/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs index ebe407d4822..d090c0de03f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/args/run_server.rs @@ -17,6 +17,6 @@ pub struct RunServerArgs { additional_args: Vec, #[clap(long, help = MSG_SERVER_BUILD_HELP)] pub build: bool, - #[clap(help=MSG_SERVER_URING_HELP, long, default_missing_value = "true", num_args = 0..=1)] + #[clap(help=MSG_SERVER_URING_HELP, long, default_missing_value = "true")] pub uring: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs new file mode 100644 index 00000000000..37d69fcf5bc --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/accept_chain_ownership.rs @@ -0,0 +1,42 @@ +use anyhow::Context; +use common::{forge::ForgeScriptArgs, logger, spinner::Spinner}; +use config::EcosystemConfig; +use xshell::Shell; + +use crate::{ + accept_ownership::accept_admin, + messages::{ + MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_NOT_INITIALIZED, MSG_CHAIN_OWNERSHIP_TRANSFERRED, + MSG_L1_SECRETS_MUST_BE_PRESENTED, + }, +}; + +pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let contracts = chain_config.get_contracts_config()?; + let secrets = chain_config.get_secrets_config()?; + let l1_rpc_url = secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + + let spinner = Spinner::new(MSG_ACCEPTING_ADMIN_SPINNER); + accept_admin( + shell, + &ecosystem_config, + contracts.l1.chain_admin_addr, + chain_config.get_wallets_config()?.governor_private_key(), + contracts.l1.diamond_proxy_addr, + &args, + l1_rpc_url.clone(), + ) + .await?; + spinner.finish(); + logger::success(MSG_CHAIN_OWNERSHIP_TRANSFERRED); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs index 3ea15d10f8b..5fc46c1b227 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/create.rs @@ -127,7 +127,7 @@ impl ChainCreateArgs { .ask() }); - let wallet_path: Option = if self.wallet_creation == Some(WalletCreation::InFile) { + let wallet_path: Option = if wallet_creation == WalletCreation::InFile { Some(self.wallet_path.unwrap_or_else(|| { Prompt::new(MSG_WALLET_PATH_PROMPT) .validate_with(|val: &String| { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs index 483b78e9b26..aaf995985a3 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/genesis.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use clap::Parser; use common::{db::DatabaseConfig, Prompt}; use config::ChainConfig; @@ -6,11 +7,10 @@ use slugify_rs::slugify; use url::Url; use crate::{ - defaults::{generate_db_names, DBNames, DATABASE_PROVER_URL, DATABASE_SERVER_URL}, + defaults::{generate_db_names, DBNames, DATABASE_SERVER_URL}, messages::{ - msg_prover_db_name_prompt, msg_prover_db_url_prompt, msg_server_db_name_prompt, - msg_server_db_url_prompt, MSG_PROVER_DB_NAME_HELP, MSG_PROVER_DB_URL_HELP, - MSG_SERVER_DB_NAME_HELP, MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, + msg_server_db_name_prompt, msg_server_db_url_prompt, MSG_SERVER_DB_NAME_HELP, + MSG_SERVER_DB_URL_HELP, MSG_USE_DEFAULT_DATABASES_HELP, }, }; @@ -20,10 +20,6 @@ pub struct GenesisArgs { pub server_db_url: Option, #[clap(long, help = MSG_SERVER_DB_NAME_HELP)] pub server_db_name: Option, - #[clap(long, help = MSG_PROVER_DB_URL_HELP)] - pub prover_db_url: Option, - #[clap(long, help = MSG_PROVER_DB_NAME_HELP)] - pub prover_db_name: Option, #[clap(long, short, help = MSG_USE_DEFAULT_DATABASES_HELP)] pub use_default: bool, #[clap(long, short, action)] @@ -32,15 +28,11 @@ pub struct GenesisArgs { impl GenesisArgs { pub fn fill_values_with_prompt(self, config: &ChainConfig) -> GenesisArgsFinal { - let DBNames { - server_name, - prover_name, - } = generate_db_names(config); + let DBNames { server_name, .. } = generate_db_names(config); let chain_name = config.name.clone(); if self.use_default { GenesisArgsFinal { server_db: DatabaseConfig::new(DATABASE_SERVER_URL.clone(), server_name), - prover_db: DatabaseConfig::new(DATABASE_PROVER_URL.clone(), prover_name), dont_drop: self.dont_drop, } } else { @@ -57,31 +49,44 @@ impl GenesisArgs { }), separator = "_" ); - let prover_db_url = self.prover_db_url.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_url_prompt(&chain_name)) - .default(DATABASE_PROVER_URL.as_str()) - .ask() - }); - let prover_db_name = slugify!( - &self.prover_db_name.unwrap_or_else(|| { - Prompt::new(&msg_prover_db_name_prompt(&chain_name)) - .default(&prover_name) - .ask() - }), - separator = "_" - ); GenesisArgsFinal { server_db: DatabaseConfig::new(server_db_url, server_db_name), - prover_db: DatabaseConfig::new(prover_db_url, prover_db_name), dont_drop: self.dont_drop, } } } + + pub fn fill_values_with_secrets( + mut self, + chain_config: &ChainConfig, + ) -> anyhow::Result { + let secrets = chain_config.get_secrets_config()?; + let database = secrets + .database + .context("Database secrets must be present")?; + + let (server_db_url, server_db_name) = if let Some(db_full_url) = database.server_url { + let db_config = DatabaseConfig::from_url(db_full_url.expose_url()) + .context("Invalid server database URL")?; + (Some(db_config.url), Some(db_config.name)) + } else { + (None, None) + }; + + self.server_db_url = self.server_db_url.or(server_db_url); + self.server_db_name = self.server_db_name.or(server_db_name); + + Ok(self.fill_values_with_prompt(chain_config)) + } + + pub fn reset_db_names(&mut self) { + self.server_db_name = None; + self.server_db_url = None; + } } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GenesisArgsFinal { pub server_db: DatabaseConfig, - pub prover_db: DatabaseConfig, pub dont_drop: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs new file mode 100644 index 00000000000..c26ad647524 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/configs.rs @@ -0,0 +1,70 @@ +use clap::Parser; +use common::Prompt; +use config::ChainConfig; +use serde::{Deserialize, Serialize}; +use types::L1Network; +use url::Url; + +use crate::{ + commands::chain::args::{ + genesis::{GenesisArgs, GenesisArgsFinal}, + init::InitArgsFinal, + }, + defaults::LOCAL_RPC_URL, + messages::{ + MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, + MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, + }, +}; + +#[derive(Debug, Clone, Serialize, Deserialize, Parser)] +pub struct InitConfigsArgs { + #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] + #[serde(flatten)] + pub genesis_args: GenesisArgs, + #[clap(long, help = MSG_L1_RPC_URL_HELP)] + pub l1_rpc_url: Option, + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + pub no_port_reallocation: bool, +} + +#[derive(Debug, Serialize, Deserialize, Clone)] +pub struct InitConfigsArgsFinal { + pub genesis_args: GenesisArgsFinal, + pub l1_rpc_url: String, + pub no_port_reallocation: bool, +} + +impl InitConfigsArgs { + pub fn fill_values_with_prompt(self, config: &ChainConfig) -> InitConfigsArgsFinal { + let l1_rpc_url = self.l1_rpc_url.unwrap_or_else(|| { + let mut prompt = Prompt::new(MSG_L1_RPC_URL_PROMPT); + if config.l1_network == L1Network::Localhost { + prompt = prompt.default(LOCAL_RPC_URL); + } + prompt + .validate_with(|val: &String| -> Result<(), String> { + Url::parse(val) + .map(|_| ()) + .map_err(|_| MSG_L1_RPC_URL_INVALID_ERR.to_string()) + }) + .ask() + }); + + InitConfigsArgsFinal { + genesis_args: self.genesis_args.fill_values_with_prompt(config), + l1_rpc_url, + no_port_reallocation: self.no_port_reallocation, + } + } +} + +impl InitConfigsArgsFinal { + pub fn from_chain_init_args(init_args: &InitArgsFinal) -> InitConfigsArgsFinal { + InitConfigsArgsFinal { + genesis_args: init_args.genesis_args.clone(), + l1_rpc_url: init_args.l1_rpc_url.clone(), + no_port_reallocation: init_args.no_port_reallocation, + } + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs similarity index 68% rename from zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs rename to zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs index 9dd6c490bd7..be4d28202b8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/args/init/mod.rs @@ -1,5 +1,3 @@ -use std::str::FromStr; - use clap::Parser; use common::{forge::ForgeScriptArgs, Prompt}; use config::ChainConfig; @@ -7,40 +5,16 @@ use serde::{Deserialize, Serialize}; use types::L1Network; use url::Url; -use super::genesis::GenesisArgsFinal; use crate::{ - commands::chain::args::genesis::GenesisArgs, + commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, defaults::LOCAL_RPC_URL, messages::{ MSG_DEPLOY_PAYMASTER_PROMPT, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, - MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, MSG_PORT_OFFSET_HELP, + MSG_L1_RPC_URL_INVALID_ERR, MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, }, }; -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct PortOffset(u16); - -impl PortOffset { - pub fn from_chain_id(chain_id: u16) -> Self { - Self((chain_id - 1) * 100) - } -} - -impl FromStr for PortOffset { - type Err = String; - - fn from_str(s: &str) -> Result { - s.parse::() - .map(PortOffset) - .map_err(|_| "Invalid port offset".to_string()) - } -} - -impl From for u16 { - fn from(port_offset: PortOffset) -> Self { - port_offset.0 - } -} +pub mod configs; #[derive(Debug, Clone, Serialize, Deserialize, Parser)] pub struct InitArgs { @@ -55,8 +29,8 @@ pub struct InitArgs { pub deploy_paymaster: Option, #[clap(long, help = MSG_L1_RPC_URL_HELP)] pub l1_rpc_url: Option, - #[clap(long, help = MSG_PORT_OFFSET_HELP)] - pub port_offset: Option, + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + pub no_port_reallocation: bool, } impl InitArgs { @@ -86,10 +60,7 @@ impl InitArgs { genesis_args: self.genesis_args.fill_values_with_prompt(config), deploy_paymaster, l1_rpc_url, - port_offset: self - .port_offset - .unwrap_or(PortOffset::from_chain_id(config.id as u16)) - .into(), + no_port_reallocation: self.no_port_reallocation, } } } @@ -100,5 +71,5 @@ pub struct InitArgsFinal { pub genesis_args: GenesisArgsFinal, pub deploy_paymaster: bool, pub l1_rpc_url: String, - pub port_offset: u16, + pub no_port_reallocation: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs index 68cb7a9a074..5f1be15231b 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/build_transactions.rs @@ -1,14 +1,15 @@ use anyhow::Context; -use common::{config::global_config, git, logger, spinner::Spinner}; +use common::{git, logger, spinner::Spinner}; use config::{ copy_configs, traits::SaveConfigWithBasePath, update_from_chain_config, EcosystemConfig, }; use ethers::utils::hex::ToHex; use xshell::Shell; -use super::common::register_chain; use crate::{ - commands::chain::args::build_transactions::BuildTransactionsArgs, + commands::chain::{ + args::build_transactions::BuildTransactionsArgs, register_chain::register_chain, + }, messages::{ MSG_BUILDING_CHAIN_REGISTRATION_TXNS_SPINNER, MSG_CHAIN_NOT_FOUND_ERR, MSG_CHAIN_TRANSACTIONS_BUILT, MSG_CHAIN_TXN_MISSING_CONTRACT_CONFIG, @@ -27,9 +28,8 @@ const SCRIPT_CONFIG_FILE_DST: &str = "register-hyperchain.toml"; pub(crate) async fn run(args: BuildTransactionsArgs, shell: &Shell) -> anyhow::Result<()> { let config = EcosystemConfig::from_file(shell)?; - let chain_name = global_config().chain_name.clone(); let chain_config = config - .load_chain(chain_name) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; let args = args.fill_values_with_prompt(config.default_chain.clone()); diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs index ec70d6122d2..e0aa0b4e047 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/common.rs @@ -1,66 +1,12 @@ -use common::{ - forge::{Forge, ForgeScriptArgs}, - spinner::Spinner, -}; -use config::{ - forge_interface::{ - register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, - script_params::REGISTER_CHAIN_SCRIPT_PARAMS, - }, - traits::{ReadConfig, SaveConfig}, - ChainConfig, ContractsConfig, EcosystemConfig, -}; +use common::spinner::Spinner; +use config::{ChainConfig, EcosystemConfig}; use types::{BaseToken, L1Network, WalletCreation}; -use xshell::Shell; use crate::{ consts::AMOUNT_FOR_DISTRIBUTION_TO_WALLETS, messages::{MSG_DISTRIBUTING_ETH_SPINNER, MSG_MINT_BASE_TOKEN_SPINNER}, - utils::forge::{check_the_balance, fill_forge_private_key}, }; -#[allow(clippy::too_many_arguments)] -pub async fn register_chain( - shell: &Shell, - forge_args: ForgeScriptArgs, - config: &EcosystemConfig, - chain_config: &ChainConfig, - contracts: &mut ContractsConfig, - l1_rpc_url: String, - sender: Option, - broadcast: bool, -) -> anyhow::Result<()> { - let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); - - let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; - deploy_config.save(shell, deploy_config_path)?; - - let mut forge = Forge::new(&config.path_to_foundry()) - .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) - .with_ffi() - .with_rpc_url(l1_rpc_url); - - if broadcast { - forge = forge.with_broadcast(); - } - - if let Some(address) = sender { - forge = forge.with_sender(address); - } else { - forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; - check_the_balance(&forge).await?; - } - - forge.run(shell)?; - - let register_chain_output = RegisterChainOutput::read( - shell, - REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), - )?; - contracts.set_chain_contracts(®ister_chain_output); - Ok(()) -} - // Distribute eth to the chain wallets for localhost environment pub async fn distribute_eth( ecosystem_config: &EcosystemConfig, @@ -111,7 +57,7 @@ pub async fn mint_base_token( let amount = AMOUNT_FOR_DISTRIBUTION_TO_WALLETS * base_token.nominator as u128 / base_token.denominator as u128; common::ethereum::mint_token( - wallets.operator, + wallets.governor, base_token.address, addresses, l1_rpc_url, diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs index 5bfc0a62348..26a1d0bb325 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_l2_contracts.rs @@ -3,7 +3,6 @@ use std::path::Path; use anyhow::Context; use common::{ cmd::Cmd, - config::global_config, forge::{Forge, ForgeScriptArgs}, spinner::Spinner, }; @@ -43,10 +42,9 @@ pub async fn run( shell: &Shell, deploy_option: Deploy2ContractsOption, ) -> anyhow::Result<()> { - let chain_name = global_config().chain_name.clone(); let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(chain_name) + .load_current_chain() .context(MSG_CHAIN_NOT_INITIALIZED)?; let mut contracts = chain_config.get_contracts_config()?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs index 58c199189bd..0da56f0c962 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/deploy_paymaster.rs @@ -1,8 +1,5 @@ use anyhow::Context; -use common::{ - config::global_config, - forge::{Forge, ForgeScriptArgs}, -}; +use common::forge::{Forge, ForgeScriptArgs}; use config::{ forge_interface::{ paymaster::{DeployPaymasterInput, DeployPaymasterOutput}, @@ -19,10 +16,9 @@ use crate::{ }; pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { - let chain_name = global_config().chain_name.clone(); let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(chain_name) + .load_current_chain() .context(MSG_CHAIN_NOT_INITIALIZED)?; let mut contracts = chain_config.get_contracts_config()?; deploy_paymaster(shell, &chain_config, &mut contracts, args, None, true).await?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs deleted file mode 100644 index 187af41489d..00000000000 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis.rs +++ /dev/null @@ -1,176 +0,0 @@ -use std::path::PathBuf; - -use anyhow::Context; -use common::{ - config::global_config, - db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, - logger, - server::{Server, ServerMode}, - spinner::Spinner, -}; -use config::{ - override_config, set_databases, set_file_artifacts, set_rocks_db_config, - traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, - ChainConfig, ContractsConfig, EcosystemConfig, FileArtifacts, GeneralConfig, GenesisConfig, - SecretsConfig, WalletsConfig, -}; -use types::ProverMode; -use xshell::Shell; -use zksync_basic_types::commitment::L1BatchCommitmentMode; - -use super::args::genesis::GenesisArgsFinal; -use crate::{ - commands::chain::args::genesis::GenesisArgs, - consts::{ - PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, - PROVER_MIGRATIONS, SERVER_MIGRATIONS, - }, - messages::{ - MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR, - MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, MSG_FAILED_TO_RUN_SERVER_ERR, - MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, - MSG_INITIALIZING_PROVER_DATABASE, MSG_INITIALIZING_SERVER_DATABASE, - MSG_RECREATE_ROCKS_DB_ERRROR, MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, - MSG_STARTING_GENESIS_SPINNER, - }, - utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, -}; - -pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { - let chain_name = global_config().chain_name.clone(); - let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain_config = ecosystem_config - .load_chain(chain_name) - .context(MSG_CHAIN_NOT_INITIALIZED)?; - let args = args.fill_values_with_prompt(&chain_config); - - genesis(args, shell, &chain_config).await?; - logger::outro(MSG_GENESIS_COMPLETED); - - Ok(()) -} - -pub async fn genesis( - args: GenesisArgsFinal, - shell: &Shell, - config: &ChainConfig, -) -> anyhow::Result<()> { - shell.create_dir(&config.rocks_db_path)?; - - let link_to_code = config.link_to_code.clone(); - let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) - .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; - let mut general = config.get_general_config()?; - let file_artifacts = FileArtifacts::new(config.artifacts.clone()); - set_rocks_db_config(&mut general, rocks_db)?; - set_file_artifacts(&mut general, file_artifacts); - general.save_with_base_path(shell, &config.configs)?; - - if config.prover_version != ProverMode::NoProofs { - override_config( - shell, - link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), - config, - )?; - } - - if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { - override_config( - shell, - link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), - config, - )?; - } - - let mut secrets = config.get_secrets_config()?; - set_databases(&mut secrets, &args.server_db, &args.prover_db)?; - secrets.save_with_base_path(shell, &config.configs)?; - - logger::note( - MSG_SELECTED_CONFIG, - logger::object_to_string(serde_json::json!({ - "chain_config": config, - "server_db_config": args.server_db, - "prover_db_config": args.prover_db, - })), - ); - logger::info(MSG_STARTING_GENESIS); - - let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); - initialize_databases( - shell, - &args.server_db, - &args.prover_db, - config.link_to_code.clone(), - args.dont_drop, - ) - .await?; - spinner.finish(); - - let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); - run_server_genesis(config, shell)?; - spinner.finish(); - - Ok(()) -} - -async fn initialize_databases( - shell: &Shell, - server_db_config: &DatabaseConfig, - prover_db_config: &DatabaseConfig, - link_to_code: PathBuf, - dont_drop: bool, -) -> anyhow::Result<()> { - let path_to_server_migration = link_to_code.join(SERVER_MIGRATIONS); - - if global_config().verbose { - logger::debug(MSG_INITIALIZING_SERVER_DATABASE) - } - if !dont_drop { - drop_db_if_exists(server_db_config) - .await - .context(MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR)?; - init_db(server_db_config).await?; - } - migrate_db( - shell, - path_to_server_migration, - &server_db_config.full_url(), - ) - .await?; - - if global_config().verbose { - logger::debug(MSG_INITIALIZING_PROVER_DATABASE) - } - if !dont_drop { - drop_db_if_exists(prover_db_config) - .await - .context(MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR)?; - init_db(prover_db_config).await?; - } - let path_to_prover_migration = link_to_code.join(PROVER_MIGRATIONS); - migrate_db( - shell, - path_to_prover_migration, - &prover_db_config.full_url(), - ) - .await?; - - Ok(()) -} - -fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { - let server = Server::new(None, chain_config.link_to_code.clone(), false); - server - .run( - shell, - ServerMode::Genesis, - GenesisConfig::get_path_with_base_path(&chain_config.configs), - WalletsConfig::get_path_with_base_path(&chain_config.configs), - GeneralConfig::get_path_with_base_path(&chain_config.configs), - SecretsConfig::get_path_with_base_path(&chain_config.configs), - ContractsConfig::get_path_with_base_path(&chain_config.configs), - vec![], - ) - .context(MSG_FAILED_TO_RUN_SERVER_ERR) -} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs new file mode 100644 index 00000000000..edf480946be --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/database.rs @@ -0,0 +1,118 @@ +use std::path::PathBuf; + +use anyhow::Context; +use common::{ + config::global_config, + db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, + logger, +}; +use config::{ + override_config, set_file_artifacts, set_rocks_db_config, set_server_database, + traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig, FileArtifacts, +}; +use types::ProverMode; +use xshell::Shell; +use zksync_basic_types::commitment::L1BatchCommitmentMode; + +use crate::{ + commands::chain::args::genesis::{GenesisArgs, GenesisArgsFinal}, + consts::{ + PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG, PATH_TO_VALIDIUM_OVERRIDE_CONFIG, + SERVER_MIGRATIONS, + }, + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR, + MSG_GENESIS_DATABASES_INITIALIZED, MSG_INITIALIZING_SERVER_DATABASE, + MSG_RECREATE_ROCKS_DB_ERRROR, + }, + utils::rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, +}; + +pub async fn run(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + let mut secrets = chain_config.get_secrets_config()?; + let args = args.fill_values_with_secrets(&chain_config)?; + set_server_database(&mut secrets, &args.server_db)?; + secrets.save_with_base_path(shell, &chain_config.configs)?; + + initialize_server_database( + shell, + &args.server_db, + chain_config.link_to_code.clone(), + args.dont_drop, + ) + .await?; + logger::outro(MSG_GENESIS_DATABASES_INITIALIZED); + + Ok(()) +} + +pub async fn initialize_server_database( + shell: &Shell, + server_db_config: &DatabaseConfig, + link_to_code: PathBuf, + dont_drop: bool, +) -> anyhow::Result<()> { + let path_to_server_migration = link_to_code.join(SERVER_MIGRATIONS); + + if global_config().verbose { + logger::debug(MSG_INITIALIZING_SERVER_DATABASE) + } + if !dont_drop { + drop_db_if_exists(server_db_config) + .await + .context(MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR)?; + init_db(server_db_config).await?; + } + migrate_db( + shell, + path_to_server_migration, + &server_db_config.full_url(), + ) + .await?; + + Ok(()) +} + +pub fn update_configs( + args: GenesisArgsFinal, + shell: &Shell, + config: &ChainConfig, +) -> anyhow::Result<()> { + shell.create_dir(&config.rocks_db_path)?; + + // Update secrets configs + let mut secrets = config.get_secrets_config()?; + set_server_database(&mut secrets, &args.server_db)?; + secrets.save_with_base_path(shell, &config.configs)?; + + // Update general config + let mut general = config.get_general_config()?; + let rocks_db = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::Main) + .context(MSG_RECREATE_ROCKS_DB_ERRROR)?; + let file_artifacts = FileArtifacts::new(config.artifacts.clone()); + set_rocks_db_config(&mut general, rocks_db)?; + set_file_artifacts(&mut general, file_artifacts); + general.save_with_base_path(shell, &config.configs)?; + + let link_to_code = config.link_to_code.clone(); + if config.prover_version != ProverMode::NoProofs { + override_config( + shell, + link_to_code.join(PATH_TO_ONLY_REAL_PROOFS_OVERRIDE_CONFIG), + config, + )?; + } + if config.l1_batch_commit_data_generator_mode == L1BatchCommitmentMode::Validium { + override_config( + shell, + link_to_code.join(PATH_TO_VALIDIUM_OVERRIDE_CONFIG), + config, + )?; + } + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs new file mode 100644 index 00000000000..c1cc03174ae --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/mod.rs @@ -0,0 +1,92 @@ +use anyhow::Context; +use clap::{command, Parser, Subcommand}; +use common::{logger, spinner::Spinner}; +use config::{ChainConfig, EcosystemConfig}; +use xshell::Shell; + +use crate::{ + commands::chain::{ + args::genesis::{GenesisArgs, GenesisArgsFinal}, + genesis::{self, database::initialize_server_database, server::run_server_genesis}, + }, + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_GENESIS_COMPLETED, MSG_INITIALIZING_DATABASES_SPINNER, + MSG_SELECTED_CONFIG, MSG_STARTING_GENESIS, MSG_STARTING_GENESIS_SPINNER, + }, +}; + +// Genesis subcommands +pub mod database; +pub mod server; + +#[derive(Subcommand, Debug, Clone)] +pub enum GenesisSubcommands { + /// Initialize databases + #[command(alias = "database")] + InitDatabase(Box), + /// Runs server genesis + Server, +} + +#[derive(Parser, Debug)] +#[command()] +pub struct GenesisCommand { + #[command(subcommand)] + command: Option, + #[clap(flatten)] + args: GenesisArgs, +} + +pub(crate) async fn run(args: GenesisCommand, shell: &Shell) -> anyhow::Result<()> { + match args.command { + Some(GenesisSubcommands::InitDatabase(args)) => database::run(*args, shell).await, + Some(GenesisSubcommands::Server) => server::run(shell).await, + None => run_genesis(args.args, shell).await, + } +} + +pub async fn run_genesis(args: GenesisArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let args = args.fill_values_with_prompt(&chain_config); + + genesis(args, shell, &chain_config).await?; + logger::outro(MSG_GENESIS_COMPLETED); + + Ok(()) +} + +pub async fn genesis( + args: GenesisArgsFinal, + shell: &Shell, + config: &ChainConfig, +) -> anyhow::Result<()> { + genesis::database::update_configs(args.clone(), shell, config)?; + + logger::note( + MSG_SELECTED_CONFIG, + logger::object_to_string(serde_json::json!({ + "chain_config": config, + "server_db_config": args.server_db, + })), + ); + logger::info(MSG_STARTING_GENESIS); + + let spinner = Spinner::new(MSG_INITIALIZING_DATABASES_SPINNER); + initialize_server_database( + shell, + &args.server_db, + config.link_to_code.clone(), + args.dont_drop, + ) + .await?; + spinner.finish(); + + let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); + run_server_genesis(config, shell)?; + spinner.finish(); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs new file mode 100644 index 00000000000..50a74b7ea9e --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/genesis/server.rs @@ -0,0 +1,46 @@ +use anyhow::Context; +use common::{ + logger, + server::{Server, ServerMode}, + spinner::Spinner, +}; +use config::{ + traits::FileConfigWithDefaultName, ChainConfig, ContractsConfig, EcosystemConfig, + GeneralConfig, GenesisConfig, SecretsConfig, WalletsConfig, +}; +use xshell::Shell; + +use crate::messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_FAILED_TO_RUN_SERVER_ERR, MSG_GENESIS_COMPLETED, + MSG_STARTING_GENESIS_SPINNER, +}; + +pub async fn run(shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + + let spinner = Spinner::new(MSG_STARTING_GENESIS_SPINNER); + run_server_genesis(&chain_config, shell)?; + spinner.finish(); + logger::outro(MSG_GENESIS_COMPLETED); + + Ok(()) +} + +pub fn run_server_genesis(chain_config: &ChainConfig, shell: &Shell) -> anyhow::Result<()> { + let server = Server::new(None, chain_config.link_to_code.clone(), false); + server + .run( + shell, + ServerMode::Genesis, + GenesisConfig::get_path_with_base_path(&chain_config.configs), + WalletsConfig::get_path_with_base_path(&chain_config.configs), + GeneralConfig::get_path_with_base_path(&chain_config.configs), + SecretsConfig::get_path_with_base_path(&chain_config.configs), + ContractsConfig::get_path_with_base_path(&chain_config.configs), + vec![], + ) + .context(MSG_FAILED_TO_RUN_SERVER_ERR) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs new file mode 100644 index 00000000000..d0897473b83 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/configs.rs @@ -0,0 +1,105 @@ +use anyhow::Context; +use common::logger; +use config::{ + copy_configs, set_l1_rpc_url, traits::SaveConfigWithBasePath, update_from_chain_config, + ChainConfig, ContractsConfig, EcosystemConfig, DEFAULT_CONSENSUS_PORT, +}; +use ethers::types::Address; +use xshell::Shell; + +use crate::{ + commands::{ + chain::{ + args::init::configs::{InitConfigsArgs, InitConfigsArgsFinal}, + genesis, + }, + portal::update_portal_config, + }, + defaults::PORT_RANGE_END, + messages::{ + MSG_CHAIN_CONFIGS_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, + MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, + }, + utils::{ + consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, + ports::EcosystemPortsScanner, + }, +}; + +pub async fn run(args: InitConfigsArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_FOUND_ERR)?; + let args = args.fill_values_with_prompt(&chain_config); + + init_configs(&args, shell, &ecosystem_config, &chain_config).await?; + logger::outro(MSG_CHAIN_CONFIGS_INITIALIZED); + + Ok(()) +} + +pub async fn init_configs( + init_args: &InitConfigsArgsFinal, + shell: &Shell, + ecosystem_config: &EcosystemConfig, + chain_config: &ChainConfig, +) -> anyhow::Result { + // Port scanner should run before copying configs to avoid marking initial ports as assigned + let mut ecosystem_ports = EcosystemPortsScanner::scan(shell)?; + copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; + + if !init_args.no_port_reallocation { + ecosystem_ports.allocate_ports_in_yaml( + shell, + &chain_config.path_to_general_config(), + chain_config.id, + )?; + } + + // Initialize general config + let mut general_config = chain_config.get_general_config()?; + + // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` + let offset = ((chain_config.id - 1) * 100) as u16; + let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; + let consensus_port = + ecosystem_ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + + let consensus_keys = generate_consensus_keys(); + let consensus_config = get_consensus_config( + chain_config, + consensus_port, + Some(consensus_keys.clone()), + None, + )?; + general_config.consensus_config = Some(consensus_config); + general_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize genesis config + let mut genesis_config = chain_config.get_genesis_config()?; + update_from_chain_config(&mut genesis_config, chain_config); + genesis_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize contracts config + let mut contracts_config = ecosystem_config.get_contracts_config()?; + contracts_config.l1.diamond_proxy_addr = Address::zero(); + contracts_config.l1.governance_addr = Address::zero(); + contracts_config.l1.chain_admin_addr = Address::zero(); + contracts_config.l1.base_token_addr = chain_config.base_token.address; + contracts_config.save_with_base_path(shell, &chain_config.configs)?; + + // Initialize secrets config + let mut secrets = chain_config.get_secrets_config()?; + set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; + secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); + secrets.save_with_base_path(shell, &chain_config.configs)?; + + genesis::database::update_configs(init_args.genesis_args.clone(), shell, chain_config)?; + + update_portal_config(shell, chain_config) + .await + .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; + + Ok(contracts_config) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs similarity index 56% rename from zk_toolbox/crates/zk_inception/src/commands/chain/init.rs rename to zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs index fa2388a69be..ac80a5b98f7 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/init/mod.rs @@ -1,88 +1,91 @@ -use anyhow::{bail, Context}; -use common::{config::global_config, git, logger, spinner::Spinner}; -use config::{ - copy_configs, ports_config, set_l1_rpc_url, traits::SaveConfigWithBasePath, - update_from_chain_config, update_ports, ChainConfig, EcosystemConfig, GeneralConfig, -}; +use anyhow::Context; +use clap::{command, Parser, Subcommand}; +use common::{git, logger, spinner::Spinner}; +use config::{traits::SaveConfigWithBasePath, ChainConfig, EcosystemConfig}; use types::BaseToken; use xshell::Shell; -use super::common::{distribute_eth, mint_base_token, register_chain}; use crate::{ accept_ownership::accept_admin, - commands::{ - chain::{ - args::init::{InitArgs, InitArgsFinal}, - deploy_l2_contracts, deploy_paymaster, - genesis::genesis, - set_token_multiplier_setter::set_token_multiplier_setter, - setup_legacy_bridge::setup_legacy_bridge, + commands::chain::{ + args::init::{ + configs::{InitConfigsArgs, InitConfigsArgsFinal}, + InitArgs, InitArgsFinal, }, - portal::update_portal_config, + common::{distribute_eth, mint_base_token}, + deploy_l2_contracts, deploy_paymaster, + genesis::genesis, + init::configs::init_configs, + register_chain::register_chain, + set_token_multiplier_setter::set_token_multiplier_setter, + setup_legacy_bridge::setup_legacy_bridge, }, messages::{ msg_initializing_chain, MSG_ACCEPTING_ADMIN_SPINNER, MSG_CHAIN_INITIALIZED, MSG_CHAIN_NOT_FOUND_ERR, MSG_DEPLOYING_PAYMASTER, MSG_GENESIS_DATABASE_ERR, - MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR, MSG_PORTS_CONFIG_ERR, MSG_REGISTERING_CHAIN_SPINNER, MSG_SELECTED_CONFIG, MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER, MSG_WALLET_TOKEN_MULTIPLIER_SETTER_NOT_FOUND, }, - utils::consensus::{generate_consensus_keys, get_consensus_config, get_consensus_secrets}, }; -pub(crate) async fn run(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { - let chain_name = global_config().chain_name.clone(); +// Init subcommands +pub mod configs; + +#[derive(Subcommand, Debug, Clone)] +pub enum ChainInitSubcommands { + /// Initialize chain configs + Configs(InitConfigsArgs), +} + +#[derive(Parser, Debug)] +#[command()] +pub struct ChainInitCommand { + #[command(subcommand)] + command: Option, + #[clap(flatten)] + args: InitArgs, +} + +pub(crate) async fn run(args: ChainInitCommand, shell: &Shell) -> anyhow::Result<()> { + match args.command { + Some(ChainInitSubcommands::Configs(args)) => configs::run(args, shell).await, + None => run_init(args.args, shell).await, + } +} + +async fn run_init(args: InitArgs, shell: &Shell) -> anyhow::Result<()> { let config = EcosystemConfig::from_file(shell)?; let chain_config = config - .load_chain(chain_name) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; - let mut args = args.fill_values_with_prompt(&chain_config); + let args = args.fill_values_with_prompt(&chain_config); logger::note(MSG_SELECTED_CONFIG, logger::object_to_string(&chain_config)); logger::info(msg_initializing_chain("")); git::submodule_update(shell, config.link_to_code.clone())?; - init(&mut args, shell, &config, &chain_config).await?; + init(&args, shell, &config, &chain_config).await?; logger::success(MSG_CHAIN_INITIALIZED); Ok(()) } pub async fn init( - init_args: &mut InitArgsFinal, + init_args: &InitArgsFinal, shell: &Shell, ecosystem_config: &EcosystemConfig, chain_config: &ChainConfig, ) -> anyhow::Result<()> { - copy_configs(shell, &ecosystem_config.link_to_code, &chain_config.configs)?; - - let mut general_config = chain_config.get_general_config()?; - apply_port_offset(init_args.port_offset, &mut general_config)?; - let ports = ports_config(&general_config).context(MSG_PORTS_CONFIG_ERR)?; - - let consensus_keys = generate_consensus_keys(); - let consensus_config = - get_consensus_config(chain_config, ports, Some(consensus_keys.clone()), None)?; - general_config.consensus_config = Some(consensus_config); - general_config.save_with_base_path(shell, &chain_config.configs)?; - - let mut genesis_config = chain_config.get_genesis_config()?; - update_from_chain_config(&mut genesis_config, chain_config); - genesis_config.save_with_base_path(shell, &chain_config.configs)?; - - // Copy ecosystem contracts - let mut contracts_config = ecosystem_config.get_contracts_config()?; - contracts_config.l1.base_token_addr = chain_config.base_token.address; - contracts_config.save_with_base_path(shell, &chain_config.configs)?; + // Initialize configs + let init_configs_args = InitConfigsArgsFinal::from_chain_init_args(init_args); + let mut contracts_config = + init_configs(&init_configs_args, shell, ecosystem_config, chain_config).await?; + // Fund some wallet addresses with ETH or base token (only for Localhost) distribute_eth(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; mint_base_token(ecosystem_config, chain_config, init_args.l1_rpc_url.clone()).await?; - let mut secrets = chain_config.get_secrets_config()?; - set_l1_rpc_url(&mut secrets, init_args.l1_rpc_url.clone())?; - secrets.consensus = Some(get_consensus_secrets(&consensus_keys)); - secrets.save_with_base_path(shell, &chain_config.configs)?; - + // Register chain on BridgeHub (run by L1 Governor) let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); register_chain( shell, @@ -97,6 +100,8 @@ pub async fn init( .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; spinner.finish(); + + // Accept ownership for DiamondProxy (run by L2 Governor) let spinner = Spinner::new(MSG_ACCEPTING_ADMIN_SPINNER); accept_admin( shell, @@ -110,6 +115,7 @@ pub async fn init( .await?; spinner.finish(); + // Set token multiplier setter address (run by L2 Governor) if chain_config.base_token != BaseToken::eth() { let spinner = Spinner::new(MSG_UPDATING_TOKEN_MULTIPLIER_SETTER_SPINNER); set_token_multiplier_setter( @@ -130,6 +136,7 @@ pub async fn init( spinner.finish(); } + // Deploy L2 contracts: L2SharedBridge, L2DefaultUpgrader, ... (run by L1 Governor) deploy_l2_contracts::deploy_l2_contracts( shell, chain_config, @@ -140,6 +147,7 @@ pub async fn init( .await?; contracts_config.save_with_base_path(shell, &chain_config.configs)?; + // Setup legacy bridge - shouldn't be used for new chains (run by L1 Governor) if let Some(true) = chain_config.legacy_bridge { setup_legacy_bridge( shell, @@ -151,6 +159,7 @@ pub async fn init( .await?; } + // Deploy Paymaster contract (run by L2 Governor) if init_args.deploy_paymaster { let spinner = Spinner::new(MSG_DEPLOYING_PAYMASTER); deploy_paymaster::deploy_paymaster( @@ -170,21 +179,5 @@ pub async fn init( .await .context(MSG_GENESIS_DATABASE_ERR)?; - update_portal_config(shell, chain_config) - .await - .context(MSG_PORTAL_FAILED_TO_CREATE_CONFIG_ERR)?; - - Ok(()) -} - -fn apply_port_offset(port_offset: u16, general_config: &mut GeneralConfig) -> anyhow::Result<()> { - let Some(mut ports_config) = ports_config(general_config) else { - bail!("Missing ports config"); - }; - - ports_config.apply_offset(port_offset); - - update_ports(general_config, &ports_config)?; - Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs index 4ddc4bf5856..378309a07cb 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/mod.rs @@ -1,15 +1,16 @@ use ::common::forge::ForgeScriptArgs; use args::build_transactions::BuildTransactionsArgs; pub(crate) use args::create::ChainCreateArgsFinal; -use clap::Subcommand; +use clap::{command, Subcommand}; pub(crate) use create::create_chain_inner; use xshell::Shell; use crate::commands::chain::{ - args::{create::ChainCreateArgs, genesis::GenesisArgs, init::InitArgs}, - deploy_l2_contracts::Deploy2ContractsOption, + args::create::ChainCreateArgs, deploy_l2_contracts::Deploy2ContractsOption, + genesis::GenesisCommand, init::ChainInitCommand, }; +mod accept_chain_ownership; pub(crate) mod args; mod build_transactions; mod common; @@ -17,7 +18,8 @@ mod create; pub mod deploy_l2_contracts; pub mod deploy_paymaster; pub mod genesis; -pub(crate) mod init; +pub mod init; +pub mod register_chain; mod set_token_multiplier_setter; mod setup_legacy_bridge; @@ -28,20 +30,32 @@ pub enum ChainCommands { /// Create unsigned transactions for chain deployment BuildTransactions(BuildTransactionsArgs), /// Initialize chain, deploying necessary contracts and performing on-chain operations - Init(InitArgs), + Init(Box), /// Run server genesis - Genesis(GenesisArgs), - /// Initialize bridges on l2 - #[command(alias = "bridge")] - InitializeBridges(ForgeScriptArgs), - /// Deploy all l2 contracts + Genesis(GenesisCommand), + /// Register a new chain on L1 (executed by L1 governor). + /// This command deploys and configures Governance, ChainAdmin, and DiamondProxy contracts, + /// registers chain with BridgeHub and sets pending admin for DiamondProxy. + /// Note: After completion, L2 governor can accept ownership by running `accept-chain-ownership` + #[command(alias = "register")] + RegisterChain(ForgeScriptArgs), + /// Deploy all L2 contracts (executed by L1 governor). #[command(alias = "l2")] DeployL2Contracts(ForgeScriptArgs), + /// Accept ownership of L2 chain (executed by L2 governor). + /// This command should be run after `register-chain` to accept ownership of newly created + /// DiamondProxy contract. + #[command(alias = "accept-ownership")] + AcceptChainOwnership(ForgeScriptArgs), + /// Initialize bridges on L2 + #[command(alias = "bridge")] + InitializeBridges(ForgeScriptArgs), /// Deploy L2 consensus registry #[command(alias = "consensus")] DeployConsensusRegistry(ForgeScriptArgs), /// Deploy Default Upgrader - Upgrader(ForgeScriptArgs), + #[command(alias = "upgrader")] + DeployUpgrader(ForgeScriptArgs), /// Deploy paymaster smart contract #[command(alias = "paymaster")] DeployPaymaster(ForgeScriptArgs), @@ -52,16 +66,18 @@ pub enum ChainCommands { pub(crate) async fn run(shell: &Shell, args: ChainCommands) -> anyhow::Result<()> { match args { ChainCommands::Create(args) => create::run(args, shell), - ChainCommands::Init(args) => init::run(args, shell).await, + ChainCommands::Init(args) => init::run(*args, shell).await, ChainCommands::BuildTransactions(args) => build_transactions::run(args, shell).await, ChainCommands::Genesis(args) => genesis::run(args, shell).await, + ChainCommands::RegisterChain(args) => register_chain::run(args, shell).await, ChainCommands::DeployL2Contracts(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::All).await } + ChainCommands::AcceptChainOwnership(args) => accept_chain_ownership::run(args, shell).await, ChainCommands::DeployConsensusRegistry(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::ConsensusRegistry).await } - ChainCommands::Upgrader(args) => { + ChainCommands::DeployUpgrader(args) => { deploy_l2_contracts::run(args, shell, Deploy2ContractsOption::Upgrader).await } ChainCommands::InitializeBridges(args) => { diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs new file mode 100644 index 00000000000..9f2ff41f897 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/register_chain.rs @@ -0,0 +1,96 @@ +use anyhow::Context; +use common::{ + forge::{Forge, ForgeScriptArgs}, + logger, + spinner::Spinner, +}; +use config::{ + forge_interface::{ + register_chain::{input::RegisterChainL1Config, output::RegisterChainOutput}, + script_params::REGISTER_CHAIN_SCRIPT_PARAMS, + }, + traits::{ReadConfig, SaveConfig, SaveConfigWithBasePath}, + ChainConfig, ContractsConfig, EcosystemConfig, +}; +use xshell::Shell; + +use crate::{ + messages::{ + MSG_CHAIN_NOT_INITIALIZED, MSG_CHAIN_REGISTERED, MSG_L1_SECRETS_MUST_BE_PRESENTED, + MSG_REGISTERING_CHAIN_SPINNER, + }, + utils::forge::{check_the_balance, fill_forge_private_key}, +}; + +pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain_config = ecosystem_config + .load_current_chain() + .context(MSG_CHAIN_NOT_INITIALIZED)?; + let mut contracts = chain_config.get_contracts_config()?; + let secrets = chain_config.get_secrets_config()?; + let l1_rpc_url = secrets + .l1 + .context(MSG_L1_SECRETS_MUST_BE_PRESENTED)? + .l1_rpc_url + .expose_str() + .to_string(); + let spinner = Spinner::new(MSG_REGISTERING_CHAIN_SPINNER); + register_chain( + shell, + args, + &ecosystem_config, + &chain_config, + &mut contracts, + l1_rpc_url, + None, + true, + ) + .await?; + contracts.save_with_base_path(shell, chain_config.configs)?; + spinner.finish(); + logger::success(MSG_CHAIN_REGISTERED); + Ok(()) +} + +#[allow(clippy::too_many_arguments)] +pub async fn register_chain( + shell: &Shell, + forge_args: ForgeScriptArgs, + config: &EcosystemConfig, + chain_config: &ChainConfig, + contracts: &mut ContractsConfig, + l1_rpc_url: String, + sender: Option, + broadcast: bool, +) -> anyhow::Result<()> { + let deploy_config_path = REGISTER_CHAIN_SCRIPT_PARAMS.input(&config.link_to_code); + + let deploy_config = RegisterChainL1Config::new(chain_config, contracts)?; + deploy_config.save(shell, deploy_config_path)?; + + let mut forge = Forge::new(&config.path_to_foundry()) + .script(®ISTER_CHAIN_SCRIPT_PARAMS.script(), forge_args.clone()) + .with_ffi() + .with_rpc_url(l1_rpc_url); + + if broadcast { + forge = forge.with_broadcast(); + } + + if let Some(address) = sender { + forge = forge.with_sender(address); + } else { + forge = fill_forge_private_key(forge, config.get_wallets()?.governor_private_key())?; + check_the_balance(&forge).await?; + } + + forge.run(shell)?; + + let register_chain_output = RegisterChainOutput::read( + shell, + REGISTER_CHAIN_SCRIPT_PARAMS.output(&chain_config.link_to_code), + )?; + contracts.set_chain_contracts(®ister_chain_output); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs index 15f7de4c277..475725cd14e 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/chain/set_token_multiplier_setter.rs @@ -1,6 +1,5 @@ use anyhow::Context; use common::{ - config::global_config, forge::{Forge, ForgeScript, ForgeScriptArgs}, logger, spinner::Spinner, @@ -30,10 +29,9 @@ lazy_static! { } pub async fn run(args: ForgeScriptArgs, shell: &Shell) -> anyhow::Result<()> { - let chain_name = global_config().chain_name.clone(); let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(chain_name) + .load_current_chain() .context(MSG_CHAIN_NOT_INITIALIZED)?; let contracts_config = chain_config.get_contracts_config()?; let l1_url = chain_config diff --git a/zk_toolbox/crates/zk_inception/src/commands/consensus.rs b/zk_toolbox/crates/zk_inception/src/commands/consensus.rs new file mode 100644 index 00000000000..7cf96ebe5ad --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/commands/consensus.rs @@ -0,0 +1,364 @@ +use std::{borrow::Borrow, collections::HashMap, sync::Arc}; + +/// Consensus registry contract operations. +/// Includes code duplicated from `zksync_node_consensus::registry::abi`. +use anyhow::Context as _; +use common::logger; +use config::EcosystemConfig; +use ethers::{ + abi::Detokenize, + contract::{FunctionCall, Multicall}, + middleware::{Middleware, NonceManagerMiddleware, SignerMiddleware}, + providers::{Http, JsonRpcClient, PendingTransaction, Provider, RawCall as _}, + signers::{LocalWallet, Signer as _}, + types::{Address, BlockId, H256}, +}; +use xshell::Shell; +use zksync_consensus_crypto::ByteFmt; +use zksync_consensus_roles::{attester, validator}; + +use crate::{messages, utils::consensus::parse_attester_committee}; + +#[allow(warnings)] +mod abi { + include!(concat!(env!("OUT_DIR"), "/consensus_registry_abi.rs")); +} + +fn decode_attester_key(k: &abi::Secp256K1PublicKey) -> anyhow::Result { + let mut x = vec![]; + x.extend(k.tag); + x.extend(k.x); + ByteFmt::decode(&x) +} + +fn decode_weighted_attester( + a: &abi::CommitteeAttester, +) -> anyhow::Result { + Ok(attester::WeightedAttester { + weight: a.weight.into(), + key: decode_attester_key(&a.pub_key).context("key")?, + }) +} + +fn encode_attester_key(k: &attester::PublicKey) -> abi::Secp256K1PublicKey { + let b: [u8; 33] = ByteFmt::encode(k).try_into().unwrap(); + abi::Secp256K1PublicKey { + tag: b[0..1].try_into().unwrap(), + x: b[1..33].try_into().unwrap(), + } +} + +fn encode_validator_key(k: &validator::PublicKey) -> abi::Bls12381PublicKey { + let b: [u8; 96] = ByteFmt::encode(k).try_into().unwrap(); + abi::Bls12381PublicKey { + a: b[0..32].try_into().unwrap(), + b: b[32..64].try_into().unwrap(), + c: b[64..96].try_into().unwrap(), + } +} + +fn encode_validator_pop(pop: &validator::ProofOfPossession) -> abi::Bls12381Signature { + let b: [u8; 48] = ByteFmt::encode(pop).try_into().unwrap(); + abi::Bls12381Signature { + a: b[0..32].try_into().unwrap(), + b: b[32..48].try_into().unwrap(), + } +} + +#[derive(clap::Subcommand, Debug)] +pub enum Command { + /// Sets the attester committee in the consensus registry contract to + /// `consensus.genesis_spec.attesters` in general.yaml. + SetAttesterCommittee, + /// Fetches the attester committee from the consensus registry contract. + GetAttesterCommittee, +} + +/// Collection of sent transactions. +#[derive(Default)] +pub struct TxSet(Vec<(H256, &'static str)>); + +impl TxSet { + /// Sends a transactions and stores the transaction hash. + pub async fn send, D: Detokenize>( + &mut self, + name: &'static str, + call: FunctionCall, + ) -> anyhow::Result<()> { + let h = call.send().await.context(name)?.tx_hash(); + self.0.push((h, name)); + Ok(()) + } + + /// Waits for all stored transactions to complete. + pub async fn wait(self, provider: &Provider

) -> anyhow::Result<()> { + for (h, name) in self.0 { + async { + let status = PendingTransaction::new(h, provider) + .await + .context("await")? + .context(messages::MSG_RECEIPT_MISSING)? + .status + .context(messages::MSG_STATUS_MISSING)?; + anyhow::ensure!(status == 1.into(), messages::MSG_TRANSACTION_FAILED); + Ok(()) + } + .await + .context(name)?; + } + Ok(()) + } +} + +fn print_attesters(committee: &attester::Committee) { + logger::success( + committee + .iter() + .map(|a| format!("{a:?}")) + .collect::>() + .join("\n"), + ); +} + +struct Setup { + chain: config::ChainConfig, + contracts: config::ContractsConfig, + general: config::GeneralConfig, + genesis: config::GenesisConfig, +} + +impl Setup { + fn provider(&self) -> anyhow::Result> { + let l2_url = &self + .general + .api_config + .as_ref() + .context(messages::MSG_API_CONFIG_MISSING)? + .web3_json_rpc + .http_url; + Provider::try_from(l2_url).with_context(|| format!("Provider::try_from({l2_url})")) + } + + fn multicall(&self, m: Arc) -> anyhow::Result> { + Ok(Multicall::new_with_chain_id( + m, + Some( + self.chain + .get_contracts_config() + .context("get_contracts_config()")? + .l2 + .multicall3 + .context(messages::MSG_MULTICALL3_CONTRACT_NOT_CONFIGURED)?, + ), + Some(self.genesis.l2_chain_id.as_u64()), + )?) + } + + fn governor(&self) -> anyhow::Result> { + let governor = self + .chain + .get_wallets_config() + .context("get_wallets_config()")? + .governor + .private_key + .context(messages::MSG_GOVERNOR_PRIVATE_KEY_NOT_SET)?; + let governor = LocalWallet::from_bytes(governor.as_bytes()) + .context("LocalWallet::from_bytes()")? + .with_chain_id(self.genesis.l2_chain_id.as_u64()); + let provider = self.provider().context("provider()")?; + let signer = SignerMiddleware::new(provider, governor.clone()); + // Allows us to send next transaction without waiting for the previous to complete. + let signer = NonceManagerMiddleware::new(signer, governor.address()); + Ok(Arc::new(signer)) + } + + fn new(shell: &Shell) -> anyhow::Result { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + let chain = ecosystem_config + .load_current_chain() + .context(messages::MSG_CHAIN_NOT_INITIALIZED)?; + let contracts = chain + .get_contracts_config() + .context("get_contracts_config()")?; + let genesis = chain.get_genesis_config().context("get_genesis_config()")?; + let general = chain.get_general_config().context("get_general_config()")?; + Ok(Self { + chain, + contracts, + general, + genesis, + }) + } + + fn consensus_registry( + &self, + m: Arc, + ) -> anyhow::Result> { + let addr = self + .contracts + .l2 + .consensus_registry + .context(messages::MSG_CONSENSUS_REGISTRY_ADDRESS_NOT_CONFIGURED)?; + Ok(abi::ConsensusRegistry::new(addr, m)) + } + + async fn last_block(&self, m: &(impl 'static + Middleware)) -> anyhow::Result { + Ok(m.get_block_number() + .await + .context("get_block_number()")? + .into()) + } + + async fn get_attester_committee(&self) -> anyhow::Result { + let provider = Arc::new(self.provider()?); + let consensus_registry = self + .consensus_registry(provider) + .context("consensus_registry()")?; + let attesters = consensus_registry + .get_attester_committee() + .call() + .await + .context("get_attester_committee()")?; + let attesters: Vec<_> = attesters + .iter() + .map(decode_weighted_attester) + .collect::>() + .context("decode_weighted_attester()")?; + attester::Committee::new(attesters.into_iter()).context("attester::Committee::new()") + } + + async fn set_attester_committee(&self) -> anyhow::Result { + // Fetch the desired state. + let want = (|| { + Some( + &self + .general + .consensus_config + .as_ref()? + .genesis_spec + .as_ref()? + .attesters, + ) + })() + .context(messages::MSG_CONSENSUS_GENESIS_SPEC_ATTESTERS_MISSING_IN_GENERAL_YAML)?; + let want = parse_attester_committee(want).context("parse_attester_committee()")?; + + let provider = self.provider().context("provider()")?; + let block_id = self.last_block(&provider).await.context("last_block()")?; + let governor = self.governor().context("governor()")?; + let consensus_registry = self + .consensus_registry(governor.clone()) + .context("consensus_registry()")?; + let mut multicall = self.multicall(governor.clone()).context("multicall()")?; + + // Fetch contract state. + let n: usize = consensus_registry + .num_nodes() + .call_raw() + .block(block_id) + .await + .context("num_nodes()")? + .try_into() + .ok() + .context("num_nodes() overflow")?; + + multicall.block = Some(block_id); + let node_owners: Vec

= multicall + .add_calls( + false, + (0..n).map(|i| consensus_registry.node_owners(i.into())), + ) + .call_array() + .await + .context("node_owners()")?; + multicall.clear_calls(); + let nodes: Vec = multicall + .add_calls( + false, + node_owners + .iter() + .map(|addr| consensus_registry.nodes(*addr)), + ) + .call_array() + .await + .context("nodes()")?; + multicall.clear_calls(); + + // Update the state. + let mut txs = TxSet::default(); + let mut to_insert: HashMap<_, _> = want.iter().map(|a| (a.key.clone(), a.weight)).collect(); + for (i, node) in nodes.into_iter().enumerate() { + if node.attester_latest.removed { + continue; + } + let got = attester::WeightedAttester { + key: decode_attester_key(&node.attester_latest.pub_key) + .context("decode_attester_key()")?, + weight: node.attester_latest.weight.into(), + }; + if let Some(weight) = to_insert.remove(&got.key) { + if weight != got.weight { + txs.send( + "changed_attester_weight", + consensus_registry.change_attester_weight( + node_owners[i], + weight.try_into().context("weight overflow")?, + ), + ) + .await?; + } + if !node.attester_latest.active { + txs.send("activate", consensus_registry.activate(node_owners[i])) + .await?; + } + } else { + txs.send("remove", consensus_registry.remove(node_owners[i])) + .await?; + } + } + for (key, weight) in to_insert { + let vk = validator::SecretKey::generate(); + txs.send( + "add", + consensus_registry.add( + Address::random(), + /*validator_weight=*/ 1, + encode_validator_key(&vk.public()), + encode_validator_pop(&vk.sign_pop()), + weight.try_into().context("overflow")?, + encode_attester_key(&key), + ), + ) + .await?; + } + txs.send( + "commit_attester_committee", + consensus_registry.commit_attester_committee(), + ) + .await?; + txs.wait(&provider).await.context("wait()")?; + Ok(want) + } +} + +impl Command { + pub(crate) async fn run(self, shell: &Shell) -> anyhow::Result<()> { + let setup = Setup::new(shell).context("Setup::new()")?; + match self { + Self::SetAttesterCommittee => { + let want = setup.set_attester_committee().await?; + let got = setup.get_attester_committee().await?; + anyhow::ensure!( + got == want, + messages::msg_setting_attester_committee_failed(&got, &want) + ); + print_attesters(&got); + } + Self::GetAttesterCommittee => { + let got = setup.get_attester_committee().await?; + print_attesters(&got); + } + } + Ok(()) + } +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs index 32049aa0a90..9913ec817e9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/contract_verifier/run.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use common::{cmd::Cmd, config::global_config, logger}; +use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; @@ -10,7 +10,7 @@ use crate::messages::{ pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; let chain = ecosystem - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; let config_path = chain.path_to_general_config(); diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs index 2411e4b9558..830b7b25e47 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/args/init.rs @@ -10,9 +10,10 @@ use crate::{ commands::chain::args::genesis::GenesisArgs, defaults::LOCAL_RPC_URL, messages::{ - MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEPLOY_PAYMASTER_PROMPT, - MSG_DEV_ARG_HELP, MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, - MSG_L1_RPC_URL_PROMPT, MSG_OBSERVABILITY_HELP, MSG_OBSERVABILITY_PROMPT, + MSG_DEPLOY_ECOSYSTEM_PROMPT, MSG_DEPLOY_ERC20_PROMPT, MSG_DEV_ARG_HELP, + MSG_GENESIS_ARGS_HELP, MSG_L1_RPC_URL_HELP, MSG_L1_RPC_URL_INVALID_ERR, + MSG_L1_RPC_URL_PROMPT, MSG_NO_PORT_REALLOCATION_HELP, MSG_OBSERVABILITY_HELP, + MSG_OBSERVABILITY_PROMPT, }, }; @@ -73,9 +74,6 @@ pub struct EcosystemArgsFinal { #[derive(Debug, Clone, Serialize, Deserialize, Parser)] pub struct EcosystemInitArgs { - /// Deploy Paymaster contract - #[clap(long, default_missing_value = "true", num_args = 0..=1)] - pub deploy_paymaster: Option, /// Deploy ERC20 contracts #[clap(long, default_missing_value = "true", num_args = 0..=1)] pub deploy_erc20: Option, @@ -85,31 +83,33 @@ pub struct EcosystemInitArgs { #[clap(flatten)] #[serde(flatten)] pub forge_args: ForgeScriptArgs, + /// Deploy Paymaster contract + #[clap(long, default_missing_value = "true", num_args = 0..=1)] + pub deploy_paymaster: Option, #[clap(flatten, next_help_heading = MSG_GENESIS_ARGS_HELP)] #[serde(flatten)] pub genesis_args: GenesisArgs, + /// Initialize ecosystem only and skip chain initialization (chain can be initialized later with `chain init` subcommand) + #[clap(long, default_value_t = false)] + pub ecosystem_only: bool, #[clap(long, help = MSG_DEV_ARG_HELP)] pub dev: bool, #[clap(long, short = 'o', help = MSG_OBSERVABILITY_HELP, default_missing_value = "true", num_args = 0..=1)] pub observability: Option, + #[clap(long, help = MSG_NO_PORT_REALLOCATION_HELP, default_value = "false", default_missing_value = "true", num_args = 0..=1)] + pub no_port_reallocation: bool, } impl EcosystemInitArgs { pub fn fill_values_with_prompt(self, l1_network: L1Network) -> EcosystemInitArgsFinal { - let (deploy_paymaster, deploy_erc20) = if self.dev { - (true, true) + let deploy_erc20 = if self.dev { + true } else { - let deploy_paymaster = self.deploy_paymaster.unwrap_or_else(|| { - PromptConfirm::new(MSG_DEPLOY_PAYMASTER_PROMPT) - .default(true) - .ask() - }); - let deploy_erc20 = self.deploy_erc20.unwrap_or_else(|| { + self.deploy_erc20.unwrap_or_else(|| { PromptConfirm::new(MSG_DEPLOY_ERC20_PROMPT) .default(true) .ask() - }); - (deploy_paymaster, deploy_erc20) + }) }; let ecosystem = self.ecosystem.fill_values_with_prompt(l1_network, self.dev); let observability = if self.dev { @@ -123,22 +123,24 @@ impl EcosystemInitArgs { }; EcosystemInitArgsFinal { - deploy_paymaster, deploy_erc20, ecosystem, forge_args: self.forge_args.clone(), dev: self.dev, observability, + ecosystem_only: self.ecosystem_only, + no_port_reallocation: self.no_port_reallocation, } } } #[derive(Debug, Serialize, Deserialize)] pub struct EcosystemInitArgsFinal { - pub deploy_paymaster: bool, pub deploy_erc20: bool, pub ecosystem: EcosystemArgsFinal, pub forge_args: ForgeScriptArgs, pub dev: bool, pub observability: bool, + pub ecosystem_only: bool, + pub no_port_reallocation: bool, } diff --git a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs index 2d31aad1033..6b64b740aed 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/ecosystem/init.rs @@ -31,15 +31,14 @@ use super::{ use crate::{ accept_ownership::{accept_admin, accept_owner}, commands::{ - chain::{self, args::init::PortOffset}, + chain::{self}, ecosystem::create_configs::{ create_erc20_deployment_config, create_initial_deployments_config, }, }, messages::{ - msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, - msg_initializing_chain, MSG_CHAIN_NOT_INITIALIZED, - MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, + msg_chain_load_err, msg_ecosystem_initialized, msg_ecosystem_no_found_preexisting_contract, + msg_initializing_chain, MSG_DEPLOYING_ECOSYSTEM_CONTRACTS_SPINNER, MSG_DEPLOYING_ERC20, MSG_DEPLOYING_ERC20_SPINNER, MSG_ECOSYSTEM_CONTRACTS_PATH_INVALID_ERR, MSG_ECOSYSTEM_CONTRACTS_PATH_PROMPT, MSG_INITIALIZING_ECOSYSTEM, MSG_INTALLING_DEPS_SPINNER, @@ -57,11 +56,9 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { Err(_) => create_initial_deployments_config(shell, &ecosystem_config.config)?, }; - let mut genesis_args = args.genesis_args.clone(); - if args.dev { - genesis_args.use_default = true; - } - let mut final_ecosystem_args = args.fill_values_with_prompt(ecosystem_config.l1_network); + let mut final_ecosystem_args = args + .clone() + .fill_values_with_prompt(ecosystem_config.l1_network); logger::info(MSG_INITIALIZING_ECOSYSTEM); @@ -69,7 +66,7 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { setup_observability::run(shell)?; } - let contracts_config = init( + let contracts_config = init_ecosystem( &mut final_ecosystem_args, shell, &ecosystem_config, @@ -94,42 +91,17 @@ pub async fn run(args: EcosystemInitArgs, shell: &Shell) -> anyhow::Result<()> { .await?; } - // If the name of chain passed then we deploy exactly this chain otherwise deploy all chains - let list_of_chains = if let Some(name) = global_config().chain_name.clone() { - vec![name] - } else { - ecosystem_config.list_of_chains() - }; - - for chain_name in &list_of_chains { - logger::info(msg_initializing_chain(chain_name)); - let chain_config = ecosystem_config - .load_chain(Some(chain_name.clone())) - .context(MSG_CHAIN_NOT_INITIALIZED)?; - - let mut chain_init_args = chain::args::init::InitArgsFinal { - forge_args: final_ecosystem_args.forge_args.clone(), - genesis_args: genesis_args.clone().fill_values_with_prompt(&chain_config), - deploy_paymaster: final_ecosystem_args.deploy_paymaster, - l1_rpc_url: final_ecosystem_args.ecosystem.l1_rpc_url.clone(), - port_offset: PortOffset::from_chain_id(chain_config.id as u16).into(), - }; - - chain::init::init( - &mut chain_init_args, - shell, - &ecosystem_config, - &chain_config, - ) - .await?; + // Initialize chain(s) + let mut chains: Vec = vec![]; + if !final_ecosystem_args.ecosystem_only { + chains = init_chains(&args, &final_ecosystem_args, shell, &ecosystem_config).await?; } - - logger::outro(msg_ecosystem_initialized(&list_of_chains.join(","))); + logger::outro(msg_ecosystem_initialized(&chains.join(","))); Ok(()) } -async fn init( +async fn init_ecosystem( init_args: &mut EcosystemInitArgsFinal, shell: &Shell, ecosystem_config: &EcosystemConfig, @@ -358,3 +330,53 @@ async fn deploy_ecosystem_inner( Ok(contracts_config) } + +async fn init_chains( + init_args: &EcosystemInitArgs, + final_init_args: &EcosystemInitArgsFinal, + shell: &Shell, + ecosystem_config: &EcosystemConfig, +) -> anyhow::Result> { + // If the name of chain passed then we deploy exactly this chain otherwise deploy all chains + let list_of_chains = if let Some(name) = global_config().chain_name.clone() { + vec![name] + } else { + ecosystem_config.list_of_chains() + }; + // Set default values for dev mode + let mut deploy_paymaster = init_args.deploy_paymaster; + let mut genesis_args = init_args.genesis_args.clone(); + if final_init_args.dev { + deploy_paymaster = Some(true); + genesis_args.use_default = true; + } + // Can't initialize multiple chains with the same DB + if list_of_chains.len() > 1 { + genesis_args.reset_db_names(); + } + // Initialize chains + for chain_name in &list_of_chains { + logger::info(msg_initializing_chain(chain_name)); + let chain_config = ecosystem_config + .load_chain(Some(chain_name.clone())) + .context(msg_chain_load_err(chain_name))?; + + let chain_init_args = chain::args::init::InitArgs { + forge_args: final_init_args.forge_args.clone(), + genesis_args: genesis_args.clone(), + deploy_paymaster, + l1_rpc_url: Some(final_init_args.ecosystem.l1_rpc_url.clone()), + no_port_reallocation: final_init_args.no_port_reallocation, + }; + let final_chain_init_args = chain_init_args.fill_values_with_prompt(&chain_config); + + chain::init::init( + &final_chain_init_args, + shell, + ecosystem_config, + &chain_config, + ) + .await?; + } + Ok(list_of_chains) +} diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs index 6fdd3faa980..29cc2ecfbff 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/backend.rs @@ -1,7 +1,7 @@ use std::path::Path; use anyhow::Context; -use common::{config::global_config, docker}; +use common::docker; use config::{explorer_compose::ExplorerBackendComposeConfig, EcosystemConfig}; use xshell::Shell; @@ -13,7 +13,7 @@ use crate::messages::{ pub(crate) fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; let chain_name = chain_config.name.clone(); // Read chain-level explorer backend docker compose file diff --git a/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs b/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs index 43700d91a0d..5c8e10ba2d8 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/explorer/init.rs @@ -11,7 +11,6 @@ use url::Url; use xshell::Shell; use crate::{ - commands::chain::args::init::PortOffset, consts::L2_BASE_TOKEN_ADDRESS, defaults::{generate_explorer_db_name, DATABASE_EXPLORER_URL}, messages::{ @@ -19,6 +18,7 @@ use crate::{ msg_explorer_initializing_database_for, MSG_EXPLORER_FAILED_TO_DROP_DATABASE_ERR, MSG_EXPLORER_INITIALIZED, }, + utils::ports::{EcosystemPorts, EcosystemPortsScanner}, }; pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { @@ -28,6 +28,7 @@ pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { Some(ref chain_name) => vec![chain_name.clone()], None => ecosystem_config.list_of_chains(), }; + let mut ports = EcosystemPortsScanner::scan(shell)?; // Initialize chains one by one let mut explorer_config = ExplorerConfig::read_or_create_default(shell)?; for chain_name in chains_enabled.iter() { @@ -36,7 +37,7 @@ pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { .load_chain(Some(chain_name.clone())) .context(msg_chain_load_err(chain_name))?; // Build backend config - parameters required to create explorer backend services - let backend_config = build_backend_config(&chain_config); + let backend_config = build_backend_config(&mut ports, &chain_config)?; // Initialize explorer database initialize_explorer_database(&backend_config.database_url).await?; // Create explorer backend docker compose file @@ -58,16 +59,23 @@ pub(crate) async fn run(shell: &Shell) -> anyhow::Result<()> { Ok(()) } -fn build_backend_config(chain_config: &ChainConfig) -> ExplorerBackendConfig { +fn build_backend_config( + ports: &mut EcosystemPorts, + chain_config: &ChainConfig, +) -> anyhow::Result { // Prompt explorer database name logger::info(msg_explorer_initializing_database_for(&chain_config.name)); let db_config = fill_database_values_with_prompt(chain_config); // Allocate ports for backend services - let backend_ports = allocate_explorer_services_ports(chain_config); + let mut backend_ports = ExplorerBackendPorts::default(); + ports.allocate_ports_with_offset_from_defaults(&mut backend_ports, chain_config.id)?; // Build explorer backend config - ExplorerBackendConfig::new(db_config.full_url(), &backend_ports) + Ok(ExplorerBackendConfig::new( + db_config.full_url(), + &backend_ports, + )) } async fn initialize_explorer_database(db_url: &Url) -> anyhow::Result<()> { @@ -92,12 +100,6 @@ fn fill_database_values_with_prompt(config: &ChainConfig) -> db::DatabaseConfig db::DatabaseConfig::new(explorer_db_url, explorer_db_name) } -fn allocate_explorer_services_ports(chain_config: &ChainConfig) -> ExplorerBackendPorts { - // Try to allocate intuitive ports with an offset from the defaults - let offset: u16 = PortOffset::from_chain_id(chain_config.id as u16).into(); - ExplorerBackendPorts::default().with_offset(offset) -} - fn build_explorer_chain_config( chain_config: &ChainConfig, backend_config: &ExplorerBackendConfig, diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs index 28c3e80aaab..18415176496 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/init.rs @@ -1,6 +1,5 @@ use anyhow::Context; use common::{ - config::global_config, db::{drop_db_if_exists, init_db, migrate_db, DatabaseConfig}, spinner::Spinner, }; @@ -20,9 +19,8 @@ use crate::{ pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain = global_config().chain_name.clone(); let chain_config = ecosystem_config - .load_chain(chain) + .load_current_chain() .context(MSG_CHAIN_NOT_INITIALIZED)?; init(shell, &chain_config).await diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs index 89e08418c6e..5ab859d17f0 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/prepare_configs.rs @@ -1,10 +1,12 @@ use std::{collections::BTreeMap, path::Path, str::FromStr}; use anyhow::Context; -use common::{config::global_config, logger}; +use common::logger; use config::{ - external_node::ENConfig, ports_config, set_rocks_db_config, traits::SaveConfigWithBasePath, - update_ports, ChainConfig, EcosystemConfig, SecretsConfig, + external_node::ENConfig, + set_rocks_db_config, + traits::{FileConfigWithDefaultName, SaveConfigWithBasePath}, + ChainConfig, EcosystemConfig, GeneralConfig, SecretsConfig, DEFAULT_CONSENSUS_PORT, }; use xshell::Shell; use zksync_basic_types::url::SensitiveUrl; @@ -17,23 +19,24 @@ use zksync_consensus_roles as roles; use crate::{ commands::external_node::args::prepare_configs::{PrepareConfigArgs, PrepareConfigFinal}, + defaults::PORT_RANGE_END, messages::{ msg_preparing_en_config_is_done, MSG_CHAIN_NOT_INITIALIZED, MSG_CONSENSUS_CONFIG_MISSING_ERR, MSG_CONSENSUS_SECRETS_MISSING_ERR, - MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PORTS_CONFIG_ERR, MSG_PREPARING_EN_CONFIGS, + MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR, MSG_PREPARING_EN_CONFIGS, }, utils::{ consensus::{get_consensus_config, node_public_key}, + ports::EcosystemPortsScanner, rocks_db::{recreate_rocksdb_dirs, RocksDBDirOption}, }, }; pub fn run(shell: &Shell, args: PrepareConfigArgs) -> anyhow::Result<()> { logger::info(MSG_PREPARING_EN_CONFIGS); - let chain_name = global_config().chain_name.clone(); let ecosystem_config = EcosystemConfig::from_file(shell)?; let mut chain_config = ecosystem_config - .load_chain(chain_name) + .load_current_chain() .context(MSG_CHAIN_NOT_INITIALIZED)?; let args = args.fill_values_with_prompt(&chain_config); @@ -55,6 +58,7 @@ fn prepare_configs( en_configs_path: &Path, args: PrepareConfigFinal, ) -> anyhow::Result<()> { + let mut ports = EcosystemPortsScanner::scan(shell)?; let genesis = config.get_genesis_config()?; let general = config.get_general_config()?; let en_config = ENConfig { @@ -72,18 +76,23 @@ fn prepare_configs( )?, main_node_rate_limit_rps: None, gateway_url: None, + bridge_addresses_refresh_interval_sec: None, }; let mut general_en = general.clone(); - let next_empty_ports_config = ports_config(&general) - .context(MSG_PORTS_CONFIG_ERR)? - .next_empty_ports_config(); - update_ports(&mut general_en, &next_empty_ports_config)?; - // Set consensus config let main_node_consensus_config = general .consensus_config .context(MSG_CONSENSUS_CONFIG_MISSING_ERR)?; + // TODO: This is a temporary solution. We should allocate consensus port using `EcosystemPorts::allocate_ports_in_yaml` + ports.add_port_info( + main_node_consensus_config.server_addr.port(), + "Main node consensus".to_string(), + ); + let offset = ((config.id - 1) * 100) as u16; + let consensus_port_range = DEFAULT_CONSENSUS_PORT + offset..PORT_RANGE_END; + let consensus_port = ports.allocate_port(consensus_port_range, "Consensus".to_string())?; + let mut gossip_static_outbound = BTreeMap::new(); let main_node_public_key = node_public_key( &config @@ -95,12 +104,8 @@ fn prepare_configs( gossip_static_outbound.insert(main_node_public_key, main_node_consensus_config.public_addr); - let en_consensus_config = get_consensus_config( - config, - next_empty_ports_config, - None, - Some(gossip_static_outbound), - )?; + let en_consensus_config = + get_consensus_config(config, consensus_port, None, Some(gossip_static_outbound))?; general_en.consensus_config = Some(en_consensus_config.clone()); en_consensus_config.save_with_base_path(shell, en_configs_path)?; @@ -121,6 +126,7 @@ fn prepare_configs( l1: Some(L1Secrets { l1_rpc_url: SensitiveUrl::from_str(&args.l1_rpc_url).context("l1_rpc_url")?, }), + data_availability: None, }; secrets.save_with_base_path(shell, en_configs_path)?; let dirs = recreate_rocksdb_dirs(shell, &config.rocks_db_path, RocksDBDirOption::ExternalNode)?; @@ -128,5 +134,11 @@ fn prepare_configs( general_en.save_with_base_path(shell, en_configs_path)?; en_config.save_with_base_path(shell, en_configs_path)?; + ports.allocate_ports_in_yaml( + shell, + &GeneralConfig::get_path_with_base_path(en_configs_path), + 0, // This is zero because general_en ports already have a chain offset + )?; + Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs b/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs index 46328699e6d..46c98119f89 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/external_node/run.rs @@ -1,5 +1,5 @@ use anyhow::Context; -use common::{config::global_config, logger}; +use common::logger; use config::{ChainConfig, EcosystemConfig}; use xshell::Shell; @@ -12,9 +12,8 @@ use crate::{ pub async fn run(shell: &Shell, args: RunExternalNodeArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain = global_config().chain_name.clone(); let chain_config = ecosystem_config - .load_chain(chain) + .load_current_chain() .context(MSG_CHAIN_NOT_INITIALIZED)?; logger::info(MSG_STARTING_EN); diff --git a/zk_toolbox/crates/zk_inception/src/commands/mod.rs b/zk_toolbox/crates/zk_inception/src/commands/mod.rs index 523faea0478..78a46797602 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/mod.rs @@ -1,5 +1,6 @@ pub mod args; pub mod chain; +pub mod consensus; pub mod containers; pub mod contract_verifier; pub mod ecosystem; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs index 751cc48074f..59a82152f1f 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/args/run.rs @@ -1,3 +1,5 @@ +use std::path::Path; + use anyhow::anyhow; use clap::{Parser, ValueEnum}; use common::{Prompt, PromptSelect}; @@ -92,6 +94,7 @@ impl ProverComponent { in_docker: bool, args: ProverRunArgs, chain: &ChainConfig, + path_to_ecosystem: &Path, ) -> anyhow::Result> { let mut additional_args = vec![]; if in_docker { @@ -109,6 +112,18 @@ impl ProverComponent { .into_string() .map_err(|_| anyhow!("Failed to convert path to string"))?; + let general_config = path_to_ecosystem + .join(general_config) + .into_os_string() + .into_string() + .unwrap(); + + let secrets_config = path_to_ecosystem + .join(secrets_config) + .into_os_string() + .into_string() + .unwrap(); + additional_args.push(format!("--config-path={}", general_config)); additional_args.push(format!("--secrets-path={}", secrets_config)); } diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs index fd83fccfebf..703ecc18c4c 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/compressor_keys.rs @@ -1,7 +1,5 @@ use anyhow::Context; -use common::{ - check_prerequisites, cmd::Cmd, config::global_config, spinner::Spinner, WGET_PREREQUISITE, -}; +use common::{check_prerequisites, cmd::Cmd, spinner::Spinner, WGET_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig, GeneralConfig}; use xshell::{cmd, Shell}; @@ -14,7 +12,7 @@ use crate::messages::{ pub(crate) async fn run(shell: &Shell, args: CompressorKeysArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; let mut general_config = chain_config.get_general_config()?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs index 20e68274587..ad92180aea9 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/init.rs @@ -39,7 +39,7 @@ pub(crate) async fn run(args: ProverInitArgs, shell: &Shell) -> anyhow::Result<( let default_compressor_key_path = get_default_compressor_keys_path(&ecosystem_config)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; let args = args.fill_values_with_prompt(shell, &default_compressor_key_path, &chain_config)?; diff --git a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs index 5f4bf2f4a67..ed2f5b41a86 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/prover/run.rs @@ -1,7 +1,7 @@ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; use anyhow::{anyhow, Context}; -use common::{check_prerequisites, cmd::Cmd, config::global_config, logger, GPU_PREREQUISITES}; +use common::{check_prerequisites, cmd::Cmd, logger, GPU_PREREQUISITES}; use config::{get_link_to_prover, ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; @@ -19,9 +19,11 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let args = args.fill_values_with_prompt()?; let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .expect(MSG_CHAIN_NOT_FOUND_ERR); + let path_to_ecosystem = shell.current_dir(); + let link_to_prover = get_link_to_prover(&ecosystem_config); shell.change_dir(link_to_prover.clone()); @@ -29,7 +31,8 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() let in_docker = args.docker.unwrap_or(false); let application_args = component.get_application_args(in_docker)?; - let additional_args = component.get_additional_args(in_docker, args, &chain)?; + let additional_args = + component.get_additional_args(in_docker, args, &chain, &path_to_ecosystem)?; let (message, error) = match component { ProverComponent::WitnessGenerator => ( @@ -79,6 +82,7 @@ pub(crate) async fn run(args: ProverRunArgs, shell: &Shell) -> anyhow::Result<() error, &path_to_configs, &path_to_prover, + &path_to_ecosystem, )? } else { update_setup_data_path(&chain, "data/keys".to_string())?; @@ -105,9 +109,12 @@ fn run_dockerized_component( error: &'static str, path_to_configs: &PathBuf, path_to_prover: &PathBuf, + path_to_ecosystem: &Path, ) -> anyhow::Result<()> { logger::info(message); + let path_to_configs = path_to_ecosystem.join(path_to_configs); + let mut cmd = Cmd::new(cmd!( shell, "docker run --net=host -v {path_to_prover}/data/keys:/prover/data/keys -v {path_to_prover}/artifacts:/artifacts -v {path_to_configs}:/configs {application_args...} {image_name} {args...}" diff --git a/zk_toolbox/crates/zk_inception/src/commands/server.rs b/zk_toolbox/crates/zk_inception/src/commands/server.rs index f96bc3aeebc..be7a676a825 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/server.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/server.rs @@ -1,6 +1,5 @@ use anyhow::Context; use common::{ - config::global_config, logger, server::{Server, ServerMode}, }; @@ -18,9 +17,8 @@ use crate::{ pub fn run(shell: &Shell, args: RunServerArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; - let chain = global_config().chain_name.clone(); let chain_config = ecosystem_config - .load_chain(chain) + .load_current_chain() .context(MSG_CHAIN_NOT_INITIALIZED)?; logger::info(MSG_STARTING_SERVER); diff --git a/zk_toolbox/crates/zk_inception/src/commands/update.rs b/zk_toolbox/crates/zk_inception/src/commands/update.rs index c140c3a4e9c..534d490e6ca 100644 --- a/zk_toolbox/crates/zk_inception/src/commands/update.rs +++ b/zk_toolbox/crates/zk_inception/src/commands/update.rs @@ -2,6 +2,7 @@ use std::path::Path; use anyhow::{Context, Ok}; use common::{ + db::migrate_db, git, logger, spinner::Spinner, yaml::{merge_yaml, ConfigDiff}, @@ -13,15 +14,18 @@ use config::{ use xshell::Shell; use super::args::UpdateArgs; -use crate::messages::{ - msg_diff_contracts_config, msg_diff_genesis_config, msg_diff_secrets, msg_updating_chain, - MSG_CHAIN_NOT_FOUND_ERR, MSG_DIFF_EN_CONFIG, MSG_DIFF_EN_GENERAL_CONFIG, - MSG_DIFF_GENERAL_CONFIG, MSG_PULLING_ZKSYNC_CODE_SPINNER, - MSG_UPDATING_ERA_OBSERVABILITY_SPINNER, MSG_UPDATING_SUBMODULES_SPINNER, MSG_UPDATING_ZKSYNC, - MSG_ZKSYNC_UPDATED, +use crate::{ + consts::{PROVER_MIGRATIONS, SERVER_MIGRATIONS}, + messages::{ + msg_diff_contracts_config, msg_diff_genesis_config, msg_diff_secrets, msg_updating_chain, + MSG_CHAIN_NOT_FOUND_ERR, MSG_DIFF_EN_CONFIG, MSG_DIFF_EN_GENERAL_CONFIG, + MSG_DIFF_GENERAL_CONFIG, MSG_PULLING_ZKSYNC_CODE_SPINNER, + MSG_UPDATING_ERA_OBSERVABILITY_SPINNER, MSG_UPDATING_SUBMODULES_SPINNER, + MSG_UPDATING_ZKSYNC, MSG_ZKSYNC_UPDATED, + }, }; -pub fn run(shell: &Shell, args: UpdateArgs) -> anyhow::Result<()> { +pub async fn run(shell: &Shell, args: UpdateArgs) -> anyhow::Result<()> { logger::info(MSG_UPDATING_ZKSYNC); let ecosystem = EcosystemConfig::from_file(shell)?; @@ -48,7 +52,8 @@ pub fn run(shell: &Shell, args: UpdateArgs) -> anyhow::Result<()> { &genesis_config_path, &contracts_config_path, &secrets_path, - )?; + ) + .await?; } let path_to_era_observability = shell.current_dir().join(ERA_OBSERBAVILITY_DIR); @@ -114,7 +119,7 @@ fn update_config( Ok(()) } -fn update_chain( +async fn update_chain( shell: &Shell, chain: &ChainConfig, general: &Path, @@ -177,5 +182,17 @@ fn update_chain( )?; } + let secrets = chain.get_secrets_config()?; + + if let Some(db) = secrets.database { + if let Some(url) = db.server_url { + let path_to_migration = chain.link_to_code.join(SERVER_MIGRATIONS); + migrate_db(shell, path_to_migration, url.expose_url()).await?; + } + if let Some(url) = db.prover_url { + let path_to_migration = chain.link_to_code.join(PROVER_MIGRATIONS); + migrate_db(shell, path_to_migration, url.expose_url()).await?; + } + } Ok(()) } diff --git a/zk_toolbox/crates/zk_inception/src/defaults.rs b/zk_toolbox/crates/zk_inception/src/defaults.rs index 6c3821eed85..2b43009f559 100644 --- a/zk_toolbox/crates/zk_inception/src/defaults.rs +++ b/zk_toolbox/crates/zk_inception/src/defaults.rs @@ -11,6 +11,12 @@ lazy_static! { Url::parse("postgres://postgres:notsecurepassword@localhost:5432").unwrap(); } +pub const DEFAULT_OBSERVABILITY_PORT: u16 = 3000; + +// Default port range +pub const PORT_RANGE_START: u16 = 3000; +pub const PORT_RANGE_END: u16 = 5000; + pub const ROCKS_DB_STATE_KEEPER: &str = "state_keeper"; pub const ROCKS_DB_TREE: &str = "tree"; pub const ROCKS_DB_PROTECTIVE_READS: &str = "protective_reads"; diff --git a/zk_toolbox/crates/zk_inception/src/main.rs b/zk_toolbox/crates/zk_inception/src/main.rs index f1ca348df38..a305ca053b7 100644 --- a/zk_toolbox/crates/zk_inception/src/main.rs +++ b/zk_toolbox/crates/zk_inception/src/main.rs @@ -14,7 +14,7 @@ use config::EcosystemConfig; use xshell::Shell; use crate::commands::{ - args::RunServerArgs, chain::ChainCommands, ecosystem::EcosystemCommands, + args::RunServerArgs, chain::ChainCommands, consensus, ecosystem::EcosystemCommands, explorer::ExplorerCommands, external_node::ExternalNodeCommands, prover::ProverCommands, }; @@ -42,10 +42,10 @@ struct Inception { pub enum InceptionSubcommands { /// Ecosystem related commands #[command(subcommand, alias = "e")] - Ecosystem(EcosystemCommands), + Ecosystem(Box), /// Chain related commands #[command(subcommand, alias = "c")] - Chain(ChainCommands), + Chain(Box), /// Prover related commands #[command(subcommand, alias = "p")] Prover(ProverCommands), @@ -66,6 +66,8 @@ pub enum InceptionSubcommands { #[command(subcommand)] Explorer(ExplorerCommands), /// Update ZKsync + #[command(subcommand)] + Consensus(consensus::Command), #[command(alias = "u")] Update(UpdateArgs), #[command(hide = true)] @@ -119,8 +121,8 @@ async fn main() -> anyhow::Result<()> { async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Result<()> { match inception_args.command { - InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, args).await?, - InceptionSubcommands::Chain(args) => commands::chain::run(shell, args).await?, + InceptionSubcommands::Ecosystem(args) => commands::ecosystem::run(shell, *args).await?, + InceptionSubcommands::Chain(args) => commands::chain::run(shell, *args).await?, InceptionSubcommands::Prover(args) => commands::prover::run(shell, args).await?, InceptionSubcommands::Server(args) => commands::server::run(shell, args)?, InceptionSubcommands::Containers(args) => commands::containers::run(shell, args)?, @@ -131,8 +133,9 @@ async fn run_subcommand(inception_args: Inception, shell: &Shell) -> anyhow::Res commands::contract_verifier::run(shell, args).await? } InceptionSubcommands::Explorer(args) => commands::explorer::run(shell, args).await?, + InceptionSubcommands::Consensus(cmd) => cmd.run(shell).await?, InceptionSubcommands::Portal => commands::portal::run(shell).await?, - InceptionSubcommands::Update(args) => commands::update::run(shell, args)?, + InceptionSubcommands::Update(args) => commands::update::run(shell, args).await?, InceptionSubcommands::Markdown => { clap_markdown::print_help_markdown::(); } diff --git a/zk_toolbox/crates/zk_inception/src/messages.rs b/zk_toolbox/crates/zk_inception/src/messages.rs index 3bbac066dbb..ebdcf7378a4 100644 --- a/zk_toolbox/crates/zk_inception/src/messages.rs +++ b/zk_toolbox/crates/zk_inception/src/messages.rs @@ -4,6 +4,7 @@ use ethers::{ types::{H160, U256}, utils::format_ether, }; +use zksync_consensus_roles::attester; pub(super) const MSG_SETUP_KEYS_DOWNLOAD_SELECTION_PROMPT: &str = "Do you want to download the setup keys or generate them?"; @@ -51,7 +52,7 @@ pub(super) fn msg_path_to_zksync_does_not_exist_err(path: &str) -> String { /// Ecosystem and chain init related messages pub(super) const MSG_L1_RPC_URL_HELP: &str = "L1 RPC URL"; -pub(super) const MSG_PORT_OFFSET_HELP: &str = "Add a costant offset to the ports exposed by the components. Useful when running multiple chains on the same machine"; +pub(super) const MSG_NO_PORT_REALLOCATION_HELP: &str = "Do not reallocate ports"; pub(super) const MSG_GENESIS_ARGS_HELP: &str = "Genesis options"; pub(super) const MSG_DEV_ARG_HELP: &str = "Deploy ecosystem using all defaults. Suitable for local development"; @@ -71,6 +72,10 @@ pub(super) const MSG_CHAIN_NOT_FOUND_ERR: &str = "Chain not found"; pub(super) const MSG_INITIALIZING_ECOSYSTEM: &str = "Initializing ecosystem"; pub(super) const MSG_DEPLOYING_ERC20: &str = "Deploying ERC20 contracts"; pub(super) const MSG_CHAIN_INITIALIZED: &str = "Chain initialized successfully"; +pub(super) const MSG_CHAIN_CONFIGS_INITIALIZED: &str = "Chain configs were initialized"; +pub(super) const MSG_CHAIN_OWNERSHIP_TRANSFERRED: &str = + "Chain ownership was transferred successfully"; +pub(super) const MSG_CHAIN_REGISTERED: &str = "Chain registraion was successful"; pub(super) const MSG_DISTRIBUTING_ETH_SPINNER: &str = "Distributing eth..."; pub(super) const MSG_MINT_BASE_TOKEN_SPINNER: &str = "Minting base token to the governance addresses..."; @@ -99,7 +104,11 @@ pub(super) fn msg_initializing_chain(chain_name: &str) -> String { } pub(super) fn msg_ecosystem_initialized(chains: &str) -> String { - format!("Ecosystem initialized successfully with chains {chains}") + if chains.is_empty() { + "Ecosystem initialized successfully. You can initialize chain with `chain init`".to_string() + } else { + format!("Ecosystem initialized successfully with chains {chains}") + } } /// Ecosystem default related messages @@ -186,6 +195,7 @@ pub(super) const MSG_INITIALIZING_SERVER_DATABASE: &str = "Initializing server d pub(super) const MSG_FAILED_TO_DROP_SERVER_DATABASE_ERR: &str = "Failed to drop server database"; pub(super) const MSG_INITIALIZING_PROVER_DATABASE: &str = "Initializing prover database"; pub(super) const MSG_FAILED_TO_DROP_PROVER_DATABASE_ERR: &str = "Failed to drop prover database"; +pub(super) const MSG_GENESIS_DATABASES_INITIALIZED: &str = "Databases initialized successfully"; /// Chain update related messages pub(super) const MSG_WALLETS_CONFIG_MUST_BE_PRESENT: &str = "Wallets configuration must be present"; @@ -329,8 +339,6 @@ pub(super) const MSG_CONSENSUS_CONFIG_MISSING_ERR: &str = "Consensus config is m pub(super) const MSG_CONSENSUS_SECRETS_MISSING_ERR: &str = "Consensus secrets config is missing"; pub(super) const MSG_CONSENSUS_SECRETS_NODE_KEY_MISSING_ERR: &str = "Consensus node key is missing"; -pub(super) const MSG_PORTS_CONFIG_ERR: &str = "Failed to get ports config"; - pub(super) const MSG_STARTING_EN: &str = "Starting external node"; /// Prover related messages @@ -481,3 +489,22 @@ pub(super) fn msg_diff_secrets( pub(super) fn msg_updating_chain(chain: &str) -> String { format!("Updating chain: {}", chain) } + +/// consensus command messages +pub(super) const MSG_RECEIPT_MISSING: &str = "receipt missing"; +pub(super) const MSG_STATUS_MISSING: &str = "status missing"; +pub(super) const MSG_TRANSACTION_FAILED: &str = "transaction failed"; +pub(super) const MSG_API_CONFIG_MISSING: &str = "api config missing"; +pub(super) const MSG_MULTICALL3_CONTRACT_NOT_CONFIGURED: &str = + "multicall3 contract not configured"; +pub(super) const MSG_GOVERNOR_PRIVATE_KEY_NOT_SET: &str = "governor private key not set"; +pub(super) const MSG_CONSENSUS_REGISTRY_ADDRESS_NOT_CONFIGURED: &str = + "consensus registry address not configured"; +pub(super) const MSG_CONSENSUS_GENESIS_SPEC_ATTESTERS_MISSING_IN_GENERAL_YAML: &str = + "consensus.genesis_spec.attesters missing in general.yaml"; +pub(super) fn msg_setting_attester_committee_failed( + got: &attester::Committee, + want: &attester::Committee, +) -> String { + format!("setting attester committee failed: got {got:?}, want {want:?}") +} diff --git a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs index 06848334a6e..2979b4df0c1 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/consensus.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/consensus.rs @@ -3,7 +3,8 @@ use std::{ net::SocketAddr, }; -use config::{ChainConfig, PortsConfig}; +use anyhow::Context as _; +use config::ChainConfig; use secrecy::{ExposeSecret, Secret}; use zksync_config::configs::consensus::{ AttesterPublicKey, AttesterSecretKey, ConsensusConfig, ConsensusSecrets, GenesisSpec, Host, @@ -11,36 +12,53 @@ use zksync_config::configs::consensus::{ WeightedAttester, WeightedValidator, }; use zksync_consensus_crypto::{Text, TextFmt}; -use zksync_consensus_roles as roles; +use zksync_consensus_roles::{attester, node, validator}; use crate::consts::{ CONSENSUS_PUBLIC_ADDRESS_HOST, CONSENSUS_SERVER_ADDRESS_HOST, GOSSIP_DYNAMIC_INBOUND_LIMIT, MAX_BATCH_SIZE, MAX_PAYLOAD_SIZE, }; +pub(crate) fn parse_attester_committee( + attesters: &[WeightedAttester], +) -> anyhow::Result { + let attesters: Vec<_> = attesters + .iter() + .enumerate() + .map(|(i, v)| { + Ok(attester::WeightedAttester { + key: Text::new(&v.key.0).decode().context("key").context(i)?, + weight: v.weight, + }) + }) + .collect::>() + .context("attesters")?; + attester::Committee::new(attesters).context("Committee::new()") +} + #[derive(Debug, Clone)] pub struct ConsensusSecretKeys { - validator_key: roles::validator::SecretKey, - attester_key: roles::attester::SecretKey, - node_key: roles::node::SecretKey, + validator_key: validator::SecretKey, + attester_key: attester::SecretKey, + node_key: node::SecretKey, } pub struct ConsensusPublicKeys { - validator_key: roles::validator::PublicKey, - attester_key: roles::attester::PublicKey, + validator_key: validator::PublicKey, + attester_key: attester::PublicKey, } pub fn get_consensus_config( chain_config: &ChainConfig, - ports: PortsConfig, + consensus_port: u16, consensus_keys: Option, gossip_static_outbound: Option>, ) -> anyhow::Result { let genesis_spec = consensus_keys.map(|consensus_keys| get_genesis_specs(chain_config, &consensus_keys)); - let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, ports.consensus_port); - let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, ports.consensus_port); + let public_addr = SocketAddr::new(CONSENSUS_PUBLIC_ADDRESS_HOST, consensus_port); + let server_addr = SocketAddr::new(CONSENSUS_SERVER_ADDRESS_HOST, consensus_port); Ok(ConsensusConfig { server_addr, @@ -52,14 +70,15 @@ pub fn get_consensus_config( gossip_static_inbound: BTreeSet::new(), gossip_static_outbound: gossip_static_outbound.unwrap_or_default(), rpc: None, + debug_page_addr: None, }) } pub fn generate_consensus_keys() -> ConsensusSecretKeys { ConsensusSecretKeys { - validator_key: roles::validator::SecretKey::generate(), - attester_key: roles::attester::SecretKey::generate(), - node_key: roles::node::SecretKey::generate(), + validator_key: validator::SecretKey::generate(), + attester_key: attester::SecretKey::generate(), + node_key: node::SecretKey::generate(), } } @@ -95,6 +114,7 @@ pub fn get_genesis_specs( attesters: vec![attester], leader, registry_address: None, + seed_peers: [].into(), } } @@ -113,7 +133,7 @@ pub fn get_consensus_secrets(consensus_keys: &ConsensusSecretKeys) -> ConsensusS pub fn node_public_key(secrets: &ConsensusSecrets) -> anyhow::Result> { Ok(node_key(secrets)?.map(|node_secret_key| NodePublicKey(node_secret_key.public().encode()))) } -fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result> { +fn node_key(secrets: &ConsensusSecrets) -> anyhow::Result> { read_secret_text(secrets.node_key.as_ref().map(|x| &x.0)) } diff --git a/zk_toolbox/crates/zk_inception/src/utils/mod.rs b/zk_toolbox/crates/zk_inception/src/utils/mod.rs index 229d3908dc3..cf7a7ef4818 100644 --- a/zk_toolbox/crates/zk_inception/src/utils/mod.rs +++ b/zk_toolbox/crates/zk_inception/src/utils/mod.rs @@ -1,3 +1,4 @@ pub mod consensus; pub mod forge; +pub mod ports; pub mod rocks_db; diff --git a/zk_toolbox/crates/zk_inception/src/utils/ports.rs b/zk_toolbox/crates/zk_inception/src/utils/ports.rs new file mode 100644 index 00000000000..5102b4fd9c6 --- /dev/null +++ b/zk_toolbox/crates/zk_inception/src/utils/ports.rs @@ -0,0 +1,571 @@ +use std::{collections::HashMap, fmt, ops::Range, path::Path}; + +use anyhow::{bail, Context, Result}; +use config::{ + explorer_compose::ExplorerBackendPorts, EcosystemConfig, DEFAULT_EXPLORER_API_PORT, + DEFAULT_EXPLORER_DATA_FETCHER_PORT, DEFAULT_EXPLORER_WORKER_PORT, +}; +use serde_yaml::Value; +use url::Url; +use xshell::Shell; + +use crate::defaults::{DEFAULT_OBSERVABILITY_PORT, PORT_RANGE_END, PORT_RANGE_START}; + +pub struct EcosystemPorts { + pub ports: HashMap>, +} + +impl EcosystemPorts { + pub fn is_port_assigned(&self, port: u16) -> bool { + self.ports.contains_key(&port) + } + + pub fn add_port_info(&mut self, port: u16, info: String) { + self.ports.entry(port).or_default().push(info); + } + + pub fn allocate_port(&mut self, range: Range, info: String) -> anyhow::Result { + for port in range { + if !self.is_port_assigned(port) { + self.add_port_info(port, info.to_string()); + return Ok(port); + } + } + anyhow::bail!(format!( + "No available ports in the given range. Failed to allocate port for: {}", + info + )); + } + + pub fn allocate_ports_with_offset_from_defaults( + &mut self, + config: &mut T, + chain_number: u32, + ) -> Result<()> { + let offset = ((chain_number - 1) as u16) * 100; + let port_range = (PORT_RANGE_START + offset)..PORT_RANGE_END; + + let mut new_ports = HashMap::new(); + for (desc, port) in config.get_default_ports()? { + let mut new_port = port + offset; + if self.is_port_assigned(new_port) { + new_port = self.allocate_port(port_range.clone(), desc.clone())?; + } else { + self.add_port_info(new_port, desc.to_string()); + } + new_ports.insert(desc, new_port); + } + config.set_ports(new_ports)?; + Ok(()) + } + + pub fn allocate_ports_in_yaml( + &mut self, + shell: &Shell, + file_path: &Path, + chain_number: u32, + ) -> Result<()> { + let file_contents = shell.read_file(file_path)?; + let mut value: Value = serde_yaml::from_str(&file_contents)?; + let offset = (chain_number - 1) * 100; + self.traverse_allocate_ports_in_yaml(&mut value, offset)?; + let new_contents = serde_yaml::to_string(&value)?; + if new_contents != file_contents { + shell.write_file(file_path, new_contents)?; + } + Ok(()) + } + + fn traverse_allocate_ports_in_yaml( + &mut self, + value: &mut Value, + offset: u32, + ) -> anyhow::Result<()> { + match value { + Value::Mapping(map) => { + let mut updated_ports = HashMap::new(); + for (key, val) in &mut *map { + if key.as_str().map(|s| s.ends_with("port")).unwrap_or(false) { + if let Some(port) = val.as_u64().and_then(|p| u16::try_from(p).ok()) { + let new_port = self.allocate_port( + (port + offset as u16)..PORT_RANGE_END, + "".to_string(), + )?; + *val = Value::Number(serde_yaml::Number::from(new_port)); + updated_ports.insert(port, new_port); + } + } + } + // Update ports in URLs + for (key, val) in &mut *map { + if key.as_str().map(|s| s.ends_with("url")).unwrap_or(false) { + let mut url = Url::parse(val.as_str().unwrap())?; + if let Some(port) = url.port() { + if let Some(new_port) = updated_ports.get(&port) { + if let Err(()) = url.set_port(Some(*new_port)) { + bail!("Failed to update port in URL {}", url); + } else { + *val = Value::String(url.to_string()); + } + } + } + } + } + // Continue traversing + for (_, val) in map { + self.traverse_allocate_ports_in_yaml(val, offset)?; + } + } + Value::Sequence(seq) => { + for val in seq { + self.traverse_allocate_ports_in_yaml(val, offset)?; + } + } + _ => {} + } + + Ok(()) + } +} + +impl fmt::Display for EcosystemPorts { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let mut port_vec: Vec<_> = self.ports.iter().collect(); + port_vec.sort_by_key(|&(key, _)| key); + for (port, port_infos) in port_vec { + for port_info in port_infos { + writeln!(f, "{} > {}", port_info, port)?; + } + } + Ok(()) + } +} + +impl Default for EcosystemPorts { + fn default() -> Self { + let mut ports = HashMap::new(); + ports.insert( + DEFAULT_OBSERVABILITY_PORT, + vec!["Observability".to_string()], + ); + Self { + ports: HashMap::new(), + } + } +} + +pub struct EcosystemPortsScanner; + +impl EcosystemPortsScanner { + /// Scans the ecosystem directory for YAML files and extracts port information. + /// Specifically, it looks for keys ending with "port" or "ports" and collects their values. + pub fn scan(shell: &Shell) -> Result { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + + // Create a list of directories to scan: + // - Ecosystem configs directory + // - Chain configs directories + // - External node directories + // - Ecosystem directory (docker-compose files) + let mut dirs = vec![ecosystem_config.config.clone()]; + for chain in ecosystem_config.list_of_chains() { + if let Some(chain_config) = ecosystem_config.load_chain(Some(chain)) { + dirs.push(chain_config.configs.clone()); + if let Some(external_node_config_path) = &chain_config.external_node_config_path { + dirs.push(external_node_config_path.clone()); + } + } + } + dirs.push(shell.current_dir()); // Ecosystem directory + + let mut ecosystem_ports = EcosystemPorts::default(); + for dir in dirs { + if dir.is_dir() { + Self::scan_yaml_files(shell, &dir, &mut ecosystem_ports) + .context(format!("Failed to scan directory {:?}", dir))?; + } + } + + Ok(ecosystem_ports) + } + + /// Scans the given directory for YAML files in the immediate directory only (non-recursive). + /// Processes each YAML file found and updates the EcosystemPorts accordingly. + fn scan_yaml_files( + shell: &Shell, + dir: &Path, + ecosystem_ports: &mut EcosystemPorts, + ) -> Result<()> { + for path in shell.read_dir(dir)? { + if !path.is_file() { + continue; + } + if let Some(extension) = path.extension() { + if extension == "yaml" || extension == "yml" { + Self::process_yaml_file(shell, &path, ecosystem_ports) + .context(format!("Error processing YAML file {:?}", path))?; + } + } + } + Ok(()) + } + + fn process_yaml_file( + shell: &Shell, + file_path: &Path, + ecosystem_ports: &mut EcosystemPorts, + ) -> Result<()> { + let contents = shell.read_file(file_path)?; + let value: Value = serde_yaml::from_str(&contents)?; + Self::traverse_yaml(&value, "", file_path, ecosystem_ports); + Ok(()) + } + + fn traverse_yaml( + value: &Value, + path: &str, + file_path: &Path, + ecosystem_ports: &mut EcosystemPorts, + ) { + match value { + Value::Mapping(map) => { + for (key, val) in map { + let new_path = if path.is_empty() { + key.as_str().unwrap_or_default().to_string() + } else { + format!("{}:{}", path, key.as_str().unwrap_or_default()) + }; + + if key.as_str() == Some("ports") { + Self::process_docker_compose_ports( + val, + &new_path, + file_path, + ecosystem_ports, + ); + } else if key.as_str().map(|s| s.ends_with("port")).unwrap_or(false) { + Self::process_general_config_port( + val, + &new_path, + file_path, + ecosystem_ports, + ); + } + + Self::traverse_yaml(val, &new_path, file_path, ecosystem_ports); + } + } + Value::Sequence(seq) => { + for (index, val) in seq.iter().enumerate() { + let new_path = format!("{}:{}", path, index); + Self::traverse_yaml(val, &new_path, file_path, ecosystem_ports); + } + } + _ => {} + } + } + + fn process_general_config_port( + value: &Value, + path: &str, + file_path: &Path, + ecosystem_ports: &mut EcosystemPorts, + ) { + if let Some(port) = value.as_u64().and_then(|p| u16::try_from(p).ok()) { + let description = format!("[{}] {}", file_path.display(), path); + ecosystem_ports.add_port_info(port, description); + } + } + + fn process_docker_compose_ports( + value: &Value, + path: &str, + file_path: &Path, + ecosystem_ports: &mut EcosystemPorts, + ) { + if let Value::Sequence(ports) = value { + for port_entry in ports { + if let Some(port_str) = port_entry.as_str() { + if let Some(port) = Self::parse_host_port(port_str) { + Self::add_parsed_port(port, path, file_path, ecosystem_ports); + } + } + } + } + } + + fn parse_host_port(port_str: &str) -> Option { + let parts: Vec<&str> = port_str.split(':').collect(); + if parts.len() > 1 { + if let Some(host_port_str) = parts.get(parts.len() - 2) { + if let Ok(port) = host_port_str.parse::() { + return Some(port); + } + } + } + None + } + + fn add_parsed_port( + port: u16, + path: &str, + file_path: &Path, + ecosystem_ports: &mut EcosystemPorts, + ) { + let description = format!("[{}] {}", file_path.display(), path); + ecosystem_ports.add_port_info(port, description); + } +} + +pub trait ConfigWithChainPorts { + fn get_default_ports(&self) -> anyhow::Result>; + fn set_ports(&mut self, ports: HashMap) -> Result<()>; +} + +impl ConfigWithChainPorts for ExplorerBackendPorts { + fn get_default_ports(&self) -> anyhow::Result> { + Ok(HashMap::from([ + ("api_http_port".to_string(), DEFAULT_EXPLORER_API_PORT), + ( + "data_fetcher_http_port".to_string(), + DEFAULT_EXPLORER_DATA_FETCHER_PORT, + ), + ("worker_http_port".to_string(), DEFAULT_EXPLORER_WORKER_PORT), + ])) + } + + fn set_ports(&mut self, ports: HashMap) -> anyhow::Result<()> { + if ports.len() != self.get_default_ports()?.len() { + bail!("Incorrect number of ports provided"); + } + for (desc, port) in ports { + match desc.as_str() { + "api_http_port" => self.api_http_port = port, + "data_fetcher_http_port" => self.data_fetcher_http_port = port, + "worker_http_port" => self.worker_http_port = port, + _ => bail!("Unknown port descriptor: {}", desc), + } + } + Ok(()) + } +} + +#[cfg(test)] +mod tests { + use std::path::PathBuf; + + use crate::utils::ports::{EcosystemPorts, EcosystemPortsScanner}; + + #[test] + fn test_traverse_yaml() { + let yaml_content = r#" + api: + web3_json_rpc: + http_port: 3050 + ws_port: 3051 + api_namespaces: + - eth + gas_price_scale_factor: 1.5 + prometheus: + listener_port: 3412 + push_interval_ms: 100 + contract_verifier: + port: 3070 + prometheus: + listener_port: 3412 + reth: + image: "ghcr.io/paradigmxyz/reth:v1.0.6" + ports: + - 127.0.0.1:8546:8545 + postgres: + image: "postgres:14" + ports: + - "5433:5432" + + "#; + + let value = serde_yaml::from_str(yaml_content).unwrap(); + let mut ecosystem_ports = EcosystemPorts::default(); + let file_path = PathBuf::from("test_config.yaml"); + + EcosystemPortsScanner::traverse_yaml(&value, "", &file_path, &mut ecosystem_ports); + + // Assigned ports: + assert!(ecosystem_ports.is_port_assigned(3050)); + assert!(ecosystem_ports.is_port_assigned(3051)); + assert!(ecosystem_ports.is_port_assigned(3070)); + assert!(ecosystem_ports.is_port_assigned(3412)); + assert!(ecosystem_ports.is_port_assigned(8546)); + assert!(ecosystem_ports.is_port_assigned(5433)); + + // Free ports: + assert!(!ecosystem_ports.is_port_assigned(3150)); + assert!(!ecosystem_ports.is_port_assigned(3151)); + assert!(!ecosystem_ports.is_port_assigned(8545)); + assert!(!ecosystem_ports.is_port_assigned(5432)); + assert!(!ecosystem_ports.is_port_assigned(100)); + + // Check description: + let port_3050_info = ecosystem_ports.ports.get(&3050).unwrap(); + assert_eq!(port_3050_info.len(), 1); + assert_eq!( + port_3050_info[0], + "[test_config.yaml] api:web3_json_rpc:http_port" + ); + + let port_3412_info = ecosystem_ports.ports.get(&3412).unwrap(); + assert_eq!(port_3412_info.len(), 2); + assert_eq!( + port_3412_info[0], + "[test_config.yaml] api:prometheus:listener_port" + ); + assert_eq!( + port_3412_info[1], + "[test_config.yaml] prometheus:listener_port" + ); + } + + #[test] + fn test_process_port_value() { + let yaml_content = r#" + web3_json_rpc: + http_port: 3050 + "#; + + let value: serde_yaml::Value = serde_yaml::from_str(yaml_content).unwrap(); + let mut ecosystem_ports = EcosystemPorts::default(); + let file_path = PathBuf::from("test_config.yaml"); + + EcosystemPortsScanner::process_general_config_port( + &value["web3_json_rpc"]["http_port"], + "web3_json_rpc:http_port", + &file_path, + &mut ecosystem_ports, + ); + + assert!(ecosystem_ports.is_port_assigned(3050)); + let port_info = ecosystem_ports.ports.get(&3050).unwrap(); + assert_eq!(port_info[0], "[test_config.yaml] web3_json_rpc:http_port"); + } + + #[test] + fn test_parse_process_docker_compose_ports() { + assert_eq!( + EcosystemPortsScanner::parse_host_port("127.0.0.1:8546:8545"), + Some(8546) + ); + assert_eq!( + EcosystemPortsScanner::parse_host_port("5433:5432"), + Some(5433) + ); + assert_eq!(EcosystemPortsScanner::parse_host_port("localhost:80"), None); + assert_eq!(EcosystemPortsScanner::parse_host_port("8080"), None); + } + + #[test] + fn test_add_parsed_port() { + let mut ecosystem_ports = EcosystemPorts::default(); + let file_path = PathBuf::from("test_config.yaml"); + + EcosystemPortsScanner::add_parsed_port( + 8546, + "reth:ports", + &file_path, + &mut ecosystem_ports, + ); + + assert!(ecosystem_ports.is_port_assigned(8546)); + let port_info = ecosystem_ports.ports.get(&8546).unwrap(); + assert_eq!(port_info[0], "[test_config.yaml] reth:ports"); + } + + #[test] + fn test_traverse_allocate_ports_in_yaml_with_chain_number_zero() { + let yaml_content = r#" + api: + web3_json_rpc: + http_port: 3050 + http_url: http://127.0.0.1:3050 + ws_port: 3051 + ws_url: ws://127.0.0.1:3051 + gas_price_scale_factor: 1.5 + prometheus: + listener_port: 3412 + pushgateway_url: http://127.0.0.1:9091 + push_interval_ms: 100 + "#; + + let mut value = serde_yaml::from_str(yaml_content).unwrap(); + let mut ecosystem_ports = EcosystemPorts::default(); + let chain_number = 0; + let offset = chain_number * 100; + ecosystem_ports + .traverse_allocate_ports_in_yaml(&mut value, offset) + .unwrap(); + + let api = value["api"].as_mapping().unwrap(); + let web3_json_rpc = api["web3_json_rpc"].as_mapping().unwrap(); + let prometheus = api["prometheus"].as_mapping().unwrap(); + + assert_eq!(web3_json_rpc["http_port"].as_u64().unwrap(), 3050); + assert_eq!(web3_json_rpc["ws_port"].as_u64().unwrap(), 3051); + assert_eq!(prometheus["listener_port"].as_u64().unwrap(), 3412); + assert_eq!( + web3_json_rpc["http_url"].as_str().unwrap(), + "http://127.0.0.1:3050/" + ); + assert_eq!( + web3_json_rpc["ws_url"].as_str().unwrap(), + "ws://127.0.0.1:3051/" + ); + assert_eq!( + prometheus["pushgateway_url"].as_str().unwrap(), + "http://127.0.0.1:9091" + ); + } + + #[test] + fn test_traverse_allocate_ports_in_yaml_with_chain_number_one() { + let yaml_content = r#" + api: + web3_json_rpc: + http_port: 3050 + http_url: http://127.0.0.1:3050 + ws_port: 3051 + ws_url: ws://127.0.0.1:3051 + gas_price_scale_factor: 1.5 + prometheus: + listener_port: 3412 + pushgateway_url: http://127.0.0.1:9091 + push_interval_ms: 100 + "#; + + let mut value = serde_yaml::from_str(yaml_content).unwrap(); + let mut ecosystem_ports = EcosystemPorts::default(); + let chain_number = 1; + let offset = chain_number * 100; + ecosystem_ports + .traverse_allocate_ports_in_yaml(&mut value, offset) + .unwrap(); + + let api = value["api"].as_mapping().unwrap(); + let web3_json_rpc = api["web3_json_rpc"].as_mapping().unwrap(); + let prometheus = api["prometheus"].as_mapping().unwrap(); + + assert_eq!(web3_json_rpc["http_port"].as_u64().unwrap(), 3150); + assert_eq!(web3_json_rpc["ws_port"].as_u64().unwrap(), 3151); + assert_eq!(prometheus["listener_port"].as_u64().unwrap(), 3512); + assert_eq!( + web3_json_rpc["http_url"].as_str().unwrap(), + "http://127.0.0.1:3150/" + ); + assert_eq!( + web3_json_rpc["ws_url"].as_str().unwrap(), + "ws://127.0.0.1:3151/" + ); + assert_eq!( + prometheus["pushgateway_url"].as_str().unwrap(), + "http://127.0.0.1:9091" + ); + } +} diff --git a/zk_toolbox/crates/zk_supervisor/Cargo.toml b/zk_toolbox/crates/zk_supervisor/Cargo.toml index 4c450a73639..158abe4e2ec 100644 --- a/zk_toolbox/crates/zk_supervisor/Cargo.toml +++ b/zk_toolbox/crates/zk_supervisor/Cargo.toml @@ -29,3 +29,4 @@ futures.workspace = true types.workspace = true serde_yaml.workspace = true zksync_basic_types.workspace = true +sqruff-lib = { git = "https://github.com/quarylabs/sqruff", version = "0.18.2" } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs b/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs index 3adecb36d06..04e019936e1 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/config_writer.rs @@ -1,6 +1,6 @@ use anyhow::Context; use clap::Parser; -use common::{config::global_config, logger, Prompt}; +use common::{logger, Prompt}; use config::{override_config, EcosystemConfig}; use xshell::Shell; @@ -26,7 +26,7 @@ pub fn run(shell: &Shell, args: ConfigWriterArgs) -> anyhow::Result<()> { let path = args.get_config_path().into(); let ecosystem = EcosystemConfig::from_file(shell)?; let chain = ecosystem - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; logger::step(msg_overriding_config(chain.name.clone())); override_config(shell, path, &chain)?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs index fc55ed2c1f6..a6db4643c30 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/fmt.rs @@ -5,6 +5,7 @@ use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; +use super::sql_fmt::format_sql; use crate::{ commands::lint_utils::{get_unignored_files, Target}, messages::{ @@ -15,7 +16,7 @@ use crate::{ async fn prettier(shell: Shell, target: Target, check: bool) -> anyhow::Result<()> { let spinner = Spinner::new(&msg_running_fmt_for_extension_spinner(target)); - let files = get_unignored_files(&shell, &target)?; + let files = get_unignored_files(&shell, &target, None)?; if files.is_empty() { logger::info(format!("No files for {target} found")); @@ -59,6 +60,7 @@ async fn run_all_rust_formatters( check: bool, link_to_code: PathBuf, ) -> anyhow::Result<()> { + format_sql(shell.clone(), check).await?; rustfmt(shell.clone(), check, link_to_code).await?; Ok(()) } @@ -92,21 +94,16 @@ pub async fn run(shell: Shell, args: FmtArgs) -> anyhow::Result<()> { for ext in extensions { tasks.push(tokio::spawn(prettier(shell.clone(), ext, args.check))); } - tasks.push(tokio::spawn(rustfmt( + tasks.push(tokio::spawn(run_all_rust_formatters( shell.clone(), args.check, ecosystem.link_to_code, ))); tasks.push(tokio::spawn(prettier_contracts(shell.clone(), args.check))); - futures::future::join_all(tasks) - .await - .iter() - .for_each(|res| { - if let Err(err) = res { - logger::error(err) - } - }); + for result in futures::future::join_all(tasks).await { + result??; + } } Some(Formatter::Prettier { mut targets }) => { if targets.is_empty() { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs index 1861d164ce4..45a7a46ebbe 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint.rs @@ -92,7 +92,7 @@ fn lint( ) -> anyhow::Result<()> { let spinner = Spinner::new(&msg_running_linter_for_extension_spinner(target)); let _dir_guard = shell.push_dir(&ecosystem.link_to_code); - let files = get_unignored_files(shell, target)?; + let files = get_unignored_files(shell, target, None)?; let cmd = cmd!(shell, "yarn"); let config_path = ecosystem.link_to_code.join(CONFIG_PATH); let config_path = config_path.join(format!("{}.js", target)); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs index a7236dc04fb..9095e445384 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/lint_utils.rs @@ -17,14 +17,28 @@ pub enum Target { } #[derive(Deserialize, Serialize, Debug)] -struct IgnoredData { - files: Vec, - dirs: Vec, +pub struct IgnoredData { + pub files: Vec, + pub dirs: Vec, } -pub fn get_unignored_files(shell: &Shell, target: &Target) -> anyhow::Result> { +impl IgnoredData { + pub fn merge(&mut self, other: &IgnoredData) { + self.files.extend(other.files.clone()); + self.dirs.extend(other.dirs.clone()); + } +} + +pub fn get_unignored_files( + shell: &Shell, + target: &Target, + ignored_data: Option, +) -> anyhow::Result> { let mut files = Vec::new(); - let ignored_files: IgnoredData = serde_yaml::from_str(&shell.read_file(IGNORE_FILE)?)?; + let mut ignored_files: IgnoredData = serde_yaml::from_str(&shell.read_file(IGNORE_FILE)?)?; + if let Some(ignored_data) = ignored_data { + ignored_files.merge(&ignored_data); + } let output = cmd!(shell, "git ls-files --recurse-submodules").read()?; for line in output.lines() { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs index d3cb99f1e34..38ec586e745 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/mod.rs @@ -8,4 +8,5 @@ pub(crate) mod lint_utils; pub mod prover; pub mod send_transactions; pub mod snapshot; +pub(crate) mod sql_fmt; pub mod test; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs index 6a7d7ddeda8..441edb2c4b2 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/info.rs @@ -4,7 +4,7 @@ use std::{ }; use anyhow::Context as _; -use common::{config::global_config, logger}; +use common::logger; use config::{ChainConfig, EcosystemConfig}; use xshell::{cmd, Shell}; @@ -13,7 +13,7 @@ use crate::messages::MSG_CHAIN_NOT_FOUND_ERR; pub async fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .expect(MSG_CHAIN_NOT_FOUND_ERR); let link_to_code = ecosystem_config.link_to_code; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs index b1c02c9a9fe..8c2cdd4d88d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_batch.rs @@ -1,6 +1,4 @@ -use common::{ - check_prerequisites, cmd::Cmd, config::global_config, logger, PROVER_CLI_PREREQUISITE, -}; +use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; @@ -17,7 +15,7 @@ pub async fn run(shell: &Shell, args: InsertBatchArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .expect(MSG_CHAIN_NOT_FOUND_ERR); let version = info::get_protocol_version(shell, &get_link_to_prover(&ecosystem_config)).await?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs index 16bbdf13df4..3dd9b7e0a1b 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/prover/insert_version.rs @@ -1,6 +1,4 @@ -use common::{ - check_prerequisites, cmd::Cmd, config::global_config, logger, PROVER_CLI_PREREQUISITE, -}; +use common::{check_prerequisites, cmd::Cmd, logger, PROVER_CLI_PREREQUISITE}; use config::{get_link_to_prover, EcosystemConfig}; use xshell::{cmd, Shell}; @@ -17,7 +15,7 @@ pub async fn run(shell: &Shell, args: InsertVersionArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .expect(MSG_CHAIN_NOT_FOUND_ERR); let version = info::get_protocol_version(shell, &get_link_to_prover(&ecosystem_config)).await?; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs b/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs index 4ec44579aaf..608c5623334 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/snapshot.rs @@ -1,6 +1,6 @@ use anyhow::Context; use clap::Subcommand; -use common::{cmd::Cmd, config::global_config, logger}; +use common::{cmd::Cmd, logger}; use config::EcosystemConfig; use xshell::{cmd, Shell}; @@ -24,7 +24,7 @@ pub(crate) async fn run(shell: &Shell, args: SnapshotCommands) -> anyhow::Result async fn create(shell: &Shell) -> anyhow::Result<()> { let ecosystem = EcosystemConfig::from_file(shell)?; let chain = ecosystem - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; let config_path = chain.path_to_general_config(); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs b/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs new file mode 100644 index 00000000000..ede2500e6ab --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/sql_fmt.rs @@ -0,0 +1,149 @@ +use std::mem::take; + +use anyhow::{bail, Result}; +use common::spinner::Spinner; +use sqruff_lib::{api::simple::get_simple_config, core::linter::core::Linter}; +use xshell::Shell; + +use super::lint_utils::{get_unignored_files, IgnoredData, Target}; +use crate::messages::{msg_file_is_not_formatted, MSG_RUNNING_SQL_FMT_SPINNER}; + +fn format_query(query: &str) -> anyhow::Result { + let exclude_rules = vec!["LT12".to_string()]; // avoid adding newline before `$` character + let cfg = get_simple_config(Some("postgres".into()), None, Some(exclude_rules), None).unwrap(); + let mut linter = Linter::new(cfg, None, None); + let mut result = linter.lint_string_wrapped(query, None, true); + let formatted_query = take(&mut result.paths[0].files[0]).fix_string(); + // Remove first empty line + let formatted_query = formatted_query + .lines() + .skip(1) + .collect::>() + .join("\n"); + + Ok(formatted_query) +} + +fn extract_query_from_rust_string(query: &str, is_raw: bool) -> String { + let mut query = query.trim().to_string(); + if query.ends_with(',') { + query.pop(); + } + // Removing quotes + if !is_raw { + query = query[1..query.len() - 1].to_string(); + } else { + query = query[3..query.len() - 2].to_string(); + } + + // Remove all escape characters + if !is_raw { + query = query.replace(r#"\""#, "\""); + } + + query +} + +fn embed_text_inside_rust_string(query: &str) -> String { + format!("r#\"\n{}\n\"#", query) +} + +fn add_indent(query: &str, indent: usize) -> String { + query + .lines() + .map(|line| format!("{:indent$}{}", "", line)) + .collect::>() + .join("\n") +} + +fn format_rust_string_query(query: &str, is_raw: bool) -> anyhow::Result { + let base_indent = query.find(|c: char| !c.is_whitespace()).unwrap_or(0); + let raw_query = extract_query_from_rust_string(query, is_raw); + let formatted_query = format_query(&raw_query)?; + let reconstructed_rust_string = embed_text_inside_rust_string(&formatted_query); + Ok(add_indent(&reconstructed_rust_string, base_indent)) +} + +fn fmt_file(shell: &Shell, file_path: &str, check: bool) -> Result<()> { + let content = shell.read_file(file_path)?; + let mut modified_file = String::new(); + + let mut lines_to_query: Option = None; + let mut is_inside_query = false; + let mut is_raw_string = false; + let mut built_query = String::new(); + + for line in content.lines() { + if line.ends_with("sqlx::query!(") { + lines_to_query = Some(1); + is_raw_string = false; + built_query.clear(); + } else if line.ends_with("sqlx::query_as!(") { + lines_to_query = Some(2); + is_raw_string = false; + built_query.clear(); + } + + if let Some(ref mut count) = lines_to_query { + if *count == 0 { + is_inside_query = true; + lines_to_query = None; + if line.contains("r#\"") { + is_raw_string = true; + } + } else { + *count -= 1; + } + } + + if is_inside_query { + let query_not_empty = !built_query.is_empty() || line.trim().len() > 1; + let raw_string_query_ended = line.ends_with("\"#,") || line.ends_with("\"#"); + let regular_string_query_ended = + (line.ends_with("\",") || line.ends_with('"')) && query_not_empty; + built_query.push_str(line); + built_query.push('\n'); + + let line_end_is_not_escape = !line.ends_with("\\\"") && !line.ends_with("\\\","); + if (is_raw_string && raw_string_query_ended) + || (!is_raw_string && regular_string_query_ended && line_end_is_not_escape) + { + is_inside_query = false; + let ended_with_comma = built_query.trim_end().ends_with(','); + modified_file + .push_str(format_rust_string_query(&built_query, is_raw_string)?.trim_end()); + if ended_with_comma { + modified_file.push(','); + } + modified_file.push('\n'); + } + } else { + modified_file.push_str(line); + modified_file.push('\n'); + } + } + + if content != modified_file { + if check { + bail!(msg_file_is_not_formatted(file_path)); + } else { + shell.write_file(file_path, &modified_file)?; + } + } + + Ok(()) +} + +pub async fn format_sql(shell: Shell, check: bool) -> anyhow::Result<()> { + let spinner = Spinner::new(MSG_RUNNING_SQL_FMT_SPINNER); + let ignored_data = Some(IgnoredData { + files: vec![], + dirs: vec!["zk_toolbox".to_string()], + }); + let rust_files = get_unignored_files(&shell, &Target::Rs, ignored_data)?; + for file in rust_files { + fmt_file(&shell, &file, check)?; + } + spinner.finish(); + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs new file mode 100644 index 00000000000..1337566e536 --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/fees.rs @@ -0,0 +1,12 @@ +use clap::Parser; +use serde::{Deserialize, Serialize}; + +use crate::messages::{MSG_NO_DEPS_HELP, MSG_NO_KILL_HELP}; + +#[derive(Debug, Serialize, Deserialize, Parser)] +pub struct FeesArgs { + #[clap(short, long, help = MSG_NO_DEPS_HELP)] + pub no_deps: bool, + #[clap(short, long, help = MSG_NO_KILL_HELP)] + pub no_kill: bool, +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs index d74d5e64a7d..b951608e768 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/args/mod.rs @@ -1,3 +1,4 @@ +pub mod fees; pub mod integration; pub mod recovery; pub mod revert; diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs index a08b0404605..d173bb95168 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/db.rs @@ -26,7 +26,7 @@ pub async fn reset_test_databases( for dal in dals { let mut url = dal.url.clone(); url.set_path(""); - wait_for_db(&url, 3).await?; + wait_for_db(&url, 20).await?; database::reset::reset_database(shell, link_to_code, dal.clone()).await?; } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs new file mode 100644 index 00000000000..e0b881a14db --- /dev/null +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/fees.rs @@ -0,0 +1,65 @@ +use std::path::PathBuf; + +use anyhow::Context; +use common::{cmd::Cmd, config::global_config, logger}; +use config::EcosystemConfig; +use xshell::{cmd, Shell}; + +use super::{ + args::fees::FeesArgs, + utils::{build_contracts, install_and_build_dependencies, TS_INTEGRATION_PATH}, +}; +use crate::{ + commands::test::utils::{TestWallets, TEST_WALLETS_PATH}, + messages::{ + MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, + MSG_INTEGRATION_TESTS_RUN_SUCCESS, + }, +}; + +pub async fn run(shell: &Shell, args: FeesArgs) -> anyhow::Result<()> { + let ecosystem_config = EcosystemConfig::from_file(shell)?; + shell.change_dir(ecosystem_config.link_to_code.join(TS_INTEGRATION_PATH)); + let chain_config = ecosystem_config + .load_current_chain() + .expect(MSG_CHAIN_NOT_FOUND_ERR); + + if !args.no_deps { + logger::info("Installing dependencies"); + build_contracts(shell, &ecosystem_config)?; + install_and_build_dependencies(shell, &ecosystem_config)?; + } + + logger::info(format!( + "Running fees tests on chain: {}", + ecosystem_config.current_chain() + )); + + let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); + let wallets: TestWallets = serde_json::from_str(shell.read_file(&wallets_path)?.as_ref()) + .context(MSG_DESERIALIZE_TEST_WALLETS_ERR)?; + + wallets + .init_test_wallet(&ecosystem_config, &chain_config) + .await?; + + let mut command = cmd!(shell, "yarn jest fees.test.ts --testTimeout 240000") + .env("SPAWN_NODE", "1") + .env("RUN_FEE_TEST", "1") + .env("NO_KILL", args.no_kill.to_string()) + .env("CHAIN_NAME", ecosystem_config.current_chain()) + .env("MASTER_WALLET_PK", wallets.get_test_pk(&chain_config)?); + + if global_config().verbose { + command = command.env( + "ZKSYNC_DEBUG_LOGS", + format!("{:?}", global_config().verbose), + ) + } + + Cmd::new(command).with_force_run().run()?; + + logger::outro(MSG_INTEGRATION_TESTS_RUN_SUCCESS); + + Ok(()) +} diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs index fb3e1436acc..5107abf6a59 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/integration.rs @@ -7,20 +7,21 @@ use xshell::{cmd, Shell}; use super::{ args::integration::IntegrationArgs, - utils::{build_contracts, install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH}, + utils::{ + build_contracts, install_and_build_dependencies, TestWallets, TEST_WALLETS_PATH, + TS_INTEGRATION_PATH, + }, }; use crate::messages::{ msg_integration_tests_run, MSG_CHAIN_NOT_FOUND_ERR, MSG_DESERIALIZE_TEST_WALLETS_ERR, MSG_INTEGRATION_TESTS_RUN_SUCCESS, }; -const TS_INTEGRATION_PATH: &str = "core/tests/ts-integration"; - pub async fn run(shell: &Shell, args: IntegrationArgs) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; shell.change_dir(ecosystem_config.link_to_code.join(TS_INTEGRATION_PATH)); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs index 5a2a87871b5..ee307438ec9 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/loadtest.rs @@ -9,7 +9,7 @@ pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; let general_api = chain_config diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs index 7d2af71ae9c..ae6b4518e6d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/mod.rs @@ -1,6 +1,6 @@ use args::{ - integration::IntegrationArgs, recovery::RecoveryArgs, revert::RevertArgs, rust::RustArgs, - upgrade::UpgradeArgs, + fees::FeesArgs, integration::IntegrationArgs, recovery::RecoveryArgs, revert::RevertArgs, + rust::RustArgs, upgrade::UpgradeArgs, }; use clap::Subcommand; use xshell::Shell; @@ -14,6 +14,7 @@ use crate::messages::{ mod args; mod build; mod db; +mod fees; mod integration; mod l1_contracts; mod loadtest; @@ -29,6 +30,8 @@ mod wallet; pub enum TestCommands { #[clap(about = MSG_INTEGRATION_TESTS_ABOUT, alias = "i")] Integration(IntegrationArgs), + #[clap(about = "Run fees test", alias = "i")] + Fees(FeesArgs), #[clap(about = MSG_REVERT_TEST_ABOUT, alias = "r")] Revert(RevertArgs), #[clap(about = MSG_RECOVERY_TEST_ABOUT, alias = "rec")] @@ -52,6 +55,7 @@ pub enum TestCommands { pub async fn run(shell: &Shell, args: TestCommands) -> anyhow::Result<()> { match args { TestCommands::Integration(args) => integration::run(shell, args).await, + TestCommands::Fees(args) => fees::run(shell, args).await, TestCommands::Revert(args) => revert::run(shell, args).await, TestCommands::Recovery(args) => recovery::run(shell, args).await, TestCommands::Upgrade(args) => upgrade::run(shell, args), diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs index c69a9826d56..6a3e337d41e 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/recovery.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use anyhow::Context; -use common::{cmd::Cmd, config::global_config, logger, server::Server, spinner::Spinner}; +use common::{cmd::Cmd, logger, server::Server, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; @@ -40,7 +40,7 @@ async fn run_test( ) -> anyhow::Result<()> { Spinner::new("Running test...").freeze(); let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; let cmd = if args.snapshot { diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs index 97794efeb3e..8b00e9d7f4d 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/revert.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use anyhow::Context; -use common::{cmd::Cmd, config::global_config, logger, spinner::Spinner}; +use common::{cmd::Cmd, logger, spinner::Spinner}; use config::EcosystemConfig; use xshell::{cmd, Shell}; @@ -40,7 +40,7 @@ async fn run_test( Spinner::new(&msg_revert_tests_run(args.external_node)).freeze(); let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context(MSG_CHAIN_NOT_FOUND_ERR)?; let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs index fdee03fe63e..7011e0f0f87 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/rust.rs @@ -75,6 +75,12 @@ pub async fn run(shell: &Shell, args: RustArgs) -> anyhow::Result<()> { .env("TEST_PROVER_DATABASE_URL", test_prover_url); cmd.run()?; + // Run unit tests for zk_toolbox + let _dir_guard = shell.push_dir(link_to_code.join("zk_toolbox")); + Cmd::new(cmd!(shell, "cargo nextest run --release")) + .with_force_run() + .run()?; + logger::outro(MSG_UNIT_TESTS_RUN_SUCCESS); Ok(()) } diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs index 3a5cfd179cc..8656ff44d31 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/utils.rs @@ -17,7 +17,7 @@ use crate::messages::{ pub const TEST_WALLETS_PATH: &str = "etc/test_config/constant/eth.json"; const AMOUNT_FOR_DISTRIBUTION_TO_WALLETS: u128 = 1000000000000000000000; -const TS_INTEGRATION_PATH: &str = "core/tests/ts-integration"; +pub const TS_INTEGRATION_PATH: &str = "core/tests/ts-integration"; const CONTRACTS_TEST_DATA_PATH: &str = "etc/contracts-test-data"; #[derive(Deserialize)] diff --git a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs b/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs index ff5179ab5fe..62f32b50d55 100644 --- a/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs +++ b/zk_toolbox/crates/zk_supervisor/src/commands/test/wallet.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; use anyhow::Context; -use common::{config::global_config, logger}; +use common::logger; use config::EcosystemConfig; use xshell::Shell; @@ -16,7 +16,7 @@ pub fn run(shell: &Shell) -> anyhow::Result<()> { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .context("Chain not found")?; let wallets_path: PathBuf = ecosystem_config.link_to_code.join(TEST_WALLETS_PATH); diff --git a/zk_toolbox/crates/zk_supervisor/src/dals.rs b/zk_toolbox/crates/zk_supervisor/src/dals.rs index f9c07585f6d..962a848fe00 100644 --- a/zk_toolbox/crates/zk_supervisor/src/dals.rs +++ b/zk_toolbox/crates/zk_supervisor/src/dals.rs @@ -1,5 +1,4 @@ use anyhow::{anyhow, Context}; -use common::config::global_config; use config::{EcosystemConfig, SecretsConfig}; use url::Url; use xshell::Shell; @@ -91,7 +90,7 @@ pub fn get_core_dal(shell: &Shell, url: Option) -> anyhow::Result { fn get_secrets(shell: &Shell) -> anyhow::Result { let ecosystem_config = EcosystemConfig::from_file(shell)?; let chain_config = ecosystem_config - .load_chain(global_config().chain_name.clone()) + .load_current_chain() .ok_or(anyhow!(MSG_CHAIN_NOT_FOUND_ERR))?; let secrets = chain_config.get_secrets_config()?; diff --git a/zk_toolbox/crates/zk_supervisor/src/messages.rs b/zk_toolbox/crates/zk_supervisor/src/messages.rs index 5f68630f756..6f6deb22edb 100644 --- a/zk_toolbox/crates/zk_supervisor/src/messages.rs +++ b/zk_toolbox/crates/zk_supervisor/src/messages.rs @@ -197,6 +197,12 @@ pub(super) const MSG_LINT_CONFIG_PATH_ERR: &str = "Lint config path error"; pub(super) const MSG_RUNNING_CONTRACTS_LINTER_SPINNER: &str = "Running contracts linter.."; pub(super) const MSG_RUNNING_CONTRACTS_FMT_SPINNER: &str = "Running prettier for contracts.."; +pub(super) const MSG_RUNNING_SQL_FMT_SPINNER: &str = "Running SQL formatter.."; + +pub(super) fn msg_file_is_not_formatted(file: &str) -> String { + format!("File {} is not formatted", file) +} + // Test wallets related messages pub(super) const MSG_TEST_WALLETS_INFO: &str = "Print test wallets information"; pub(super) const MSG_DESERIALIZE_TEST_WALLETS_ERR: &str = "Impossible to deserialize test wallets"; diff --git a/zk_toolbox/rust-toolchain b/zk_toolbox/rust-toolchain index aaceec04e04..dbd41264aa9 100644 --- a/zk_toolbox/rust-toolchain +++ b/zk_toolbox/rust-toolchain @@ -1 +1 @@ -1.80.0 +1.81.0 diff --git a/zk_toolbox/zkup/README.md b/zk_toolbox/zkup/README.md index 002ca46b5f6..d6e3e634688 100644 --- a/zk_toolbox/zkup/README.md +++ b/zk_toolbox/zkup/README.md @@ -39,8 +39,11 @@ The `zkup` script provides various options for installing `zk_toolbox`: - `-v, --version ` Git tag to use when installing from a repository. Ignored if `--branch` or `--commit` is provided. -- `--skip-zk-supervisor` - Skip the installation of the `zk_supervisor` binary. +- `--inception` + Installs `zk_inception` from the repository. By default, `zkup` installs `zk_inception` and `zk_supervisor`. + +- `--supervisor` + Installs `zk_supervisor` from the repository. ### Local Installation @@ -66,8 +69,8 @@ different repository, branch, commit, or version using the respective options. I zkup --repo matter-labs/zksync-era --version 0.1.1 ``` -**Install from a local path, skipping `zk_supervisor`:** +**Install from a local path, only installing `zk_inception`:** ```bash -zkup --path /path/to/local/zk_toolbox --skip-zk-supervisor +zkup --path /path/to/local/zk_toolbox --inception ``` diff --git a/zk_toolbox/zkup/zkup b/zk_toolbox/zkup/zkup index 16637c35e6a..e6ca1748738 100755 --- a/zk_toolbox/zkup/zkup +++ b/zk_toolbox/zkup/zkup @@ -5,10 +5,11 @@ BASE_DIR=${XDG_CONFIG_HOME:-$HOME} ZKT_DIR=${ZKT_DIR:-"$BASE_DIR/.zkt"} ZKT_BIN_DIR="$ZKT_DIR/bin" -ZKUP_SKIP_ZK_SUPERVISOR=0 +ZKUP_INSTALL_SUPERVISOR=0 +ZKUP_INSTALL_INCEPTION=0 ZKUP_ALIAS=0 -BINS=(zk_inception zk_supervisor) +BINS=() main() { parse_args "$@" @@ -18,6 +19,8 @@ main() { check_prerequisites mkdir -p "$ZKT_BIN_DIR" + set_bins + if [ -n "$ZKUP_PATH" ]; then install_local else @@ -84,7 +87,8 @@ parse_args() { shift ZKUP_VERSION=$1 ;; - --skip-zk-supervisor) ZKUP_SKIP_ZK_SUPERVISOR=1 ;; + --inception) ZKUP_INSTALL_INCEPTION=1 ;; + --supervisor) ZKUP_INSTALL_SUPERVISOR=1 ;; -a | --alias) ZKUP_ALIAS=1 ;; -h | --help) usage @@ -113,15 +117,31 @@ Options: -c, --commit Git commit hash to use when installing from a repository. Ignored if --branch or --version is provided. -v, --version Git tag to use when installing from a repository. Ignored if --branch or --commit is provided. -a, --alias Create aliases zki and zks for zk_inception and zk_supervisor binaries. - --skip-zk-supervisor Skip installation of the zk_supervisor binary. + --inception Installs the zk_inception binary. Default is to install both zk_inception and zk_supervisor binaries. + --supervisor Installs the zk_supervisor binary. Default is to install both zk_inception and zk_supervisor binaries. -h, --help Show this help message and exit. Examples: $(basename "$0") --repo matter-labs/zksync-era --version 0.1.1 - $(basename "$0") --path /path/to/local/zk_toolbox --skip-zk-supervisor + $(basename "$0") --path /path/to/local/zk_toolbox --inception EOF } +set_bins() { + if [ $ZKUP_INSTALL_INCEPTION -eq 1 ]; then + BINS+=(zk_inception) + fi + + if [ $ZKUP_INSTALL_SUPERVISOR -eq 1 ]; then + BINS+=(zk_supervisor) + fi + + # Installs both binaries if not option is provided + if [ ${#BINS[@]} -eq 0 ]; then + BINS=(zk_inception zk_supervisor) + fi +} + install_local() { if [ ! -d "$ZKUP_PATH/zk_toolbox" ]; then err "Path $ZKUP_PATH does not contain zk_toolbox" @@ -135,10 +155,6 @@ install_local() { say "Installing zk_toolbox from $ZKUP_PATH" ensure cd "$ZKUP_PATH"/zk_toolbox - if [ $ZKUP_SKIP_ZK_SUPERVISOR -eq 1 ]; then - BINS=(zk_inception) - fi - for bin in "${BINS[@]}"; do say "Installing $bin" ensure cargo install --root $ZKT_DIR --path ./crates/$bin --force @@ -154,10 +170,6 @@ install_from_repo() { say "Installing zk_toolbox from $ZKUP_REPO" - if [ $ZKUP_SKIP_ZK_SUPERVISOR -eq 1 ]; then - BINS=(zk_inception) - fi - if [ -n "$ZKUP_VERSION" ]; then if [ -n "$ZKUP_COMMIT" ] || [ -n "$ZKUP_BRANCH" ]; then warn "Ignoring --commit and --branch arguments when installing by version" @@ -176,10 +188,12 @@ install_from_repo() { } create_alias() { - say "Creating alias 'zki' for zk_inception" - ensure ln -sf "$ZKT_BIN_DIR/zk_inception" "$ZKT_BIN_DIR/zki" + if [[ "${BINS[@]}" =~ "zk_inception" ]]; then + say "Creating alias 'zki' for zk_inception" + ensure ln -sf "$ZKT_BIN_DIR/zk_inception" "$ZKT_BIN_DIR/zki" + fi - if [ $ZKUP_SKIP_ZK_SUPERVISOR -eq 0 ]; then + if [[ "${BINS[@]}" =~ "zk_supervisor" ]]; then say "Creating alias 'zks' for zk_supervisor" ensure ln -sf "$ZKT_BIN_DIR/zk_supervisor" "$ZKT_BIN_DIR/zks" fi