From 2cab03c933a6b9a6f8e428d3847ebeb2d7f918e4 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 25 Sep 2023 09:30:52 +0100 Subject: [PATCH 01/59] ref(workflows): consolidate workflows based on their purpose This also renames the workflows to make their naming more consistent and adding a naming convention Fixes: #6166 Fixes: #6167 --- ...s-delivery.yml => cd-deploy-nodes-gcp.yml} | 113 +- .github/workflows/cd-deploy-tests-gcp.yml | 966 ++++++++++++++++++ ...ces.yml => chore-delete-gcp-resources.yml} | 0 ...p-tests.yml => chore-deploy-gcp-tests.yml} | 6 +- ...ement.yml => chore-project-management.yml} | 0 ...ly.patch.yml => ci-build-crates.patch.yml} | 2 +- ...s-individually.yml => ci-build-crates.yml} | 4 +- ...verage.patch.yml => ci-coverage.patch.yml} | 2 +- .../{coverage.yml => ci-coverage.yml} | 4 +- ...yml => ci-integration-tests-gcp.patch.yml} | 52 +- ...ocker.yml => ci-integration-tests-gcp.yml} | 251 +---- .../{lint.patch.yml => ci-lint.patch.yml} | 0 .github/workflows/{lint.yml => ci-lint.yml} | 2 +- .../workflows/ci-unit-tests-docker.patch.yml | 82 ++ .github/workflows/ci-unit-tests-docker.yml | 384 +++++++ ...ntegration-os.yml => ci-unit-tests-os.yml} | 235 ++++- ...patch.yml => ci-unites-tests-os.patch.yml} | 2 +- .../workflows/continous-delivery.patch.yml | 35 - ...tinous-integration-docker.patch-always.yml | 26 - ...tch.yml => docs-deploy-firebase.patch.yml} | 0 .../{docs.yml => docs-deploy-firebase.yml} | 0 ...ion.yml => docs-dockerhub-description.yml} | 0 ...disks.yml => manual-find-cached-disks.yml} | 0 ...al-deploy.yml => manual-zcashd-deploy.yml} | 0 .github/workflows/release-binaries.yml | 6 +- ...r-image.yml => sub-build-docker-image.yml} | 0 ...h.yml => sub-build-lightwalletd.patch.yml} | 2 +- ...walletd.yml => sub-build-lightwalletd.yml} | 4 +- ...-params.yml => sub-build-zcash-params.yml} | 8 +- book/src/dev/continuous-integration.md | 2 +- docker/Dockerfile | 2 +- 31 files changed, 1717 insertions(+), 473 deletions(-) rename .github/workflows/{continous-delivery.yml => cd-deploy-nodes-gcp.yml} (73%) create mode 100644 .github/workflows/cd-deploy-tests-gcp.yml rename .github/workflows/{delete-gcp-resources.yml => chore-delete-gcp-resources.yml} (100%) rename .github/workflows/{deploy-gcp-tests.yml => chore-deploy-gcp-tests.yml} (99%) rename .github/workflows/{project-management.yml => chore-project-management.yml} (100%) rename .github/workflows/{build-crates-individually.patch.yml => ci-build-crates.patch.yml} (97%) rename .github/workflows/{build-crates-individually.yml => ci-build-crates.yml} (97%) rename .github/workflows/{coverage.patch.yml => ci-coverage.patch.yml} (93%) rename .github/workflows/{coverage.yml => ci-coverage.yml} (97%) rename .github/workflows/{continous-integration-docker.patch.yml => ci-integration-tests-gcp.patch.yml} (67%) rename .github/workflows/{continous-integration-docker.yml => ci-integration-tests-gcp.yml} (71%) rename .github/workflows/{lint.patch.yml => ci-lint.patch.yml} (100%) rename .github/workflows/{lint.yml => ci-lint.yml} (99%) create mode 100644 .github/workflows/ci-unit-tests-docker.patch.yml create mode 100644 .github/workflows/ci-unit-tests-docker.yml rename .github/workflows/{continous-integration-os.yml => ci-unit-tests-os.yml} (57%) rename .github/workflows/{continous-integration-os.patch.yml => ci-unites-tests-os.patch.yml} (97%) delete mode 100644 .github/workflows/continous-delivery.patch.yml delete mode 100644 .github/workflows/continous-integration-docker.patch-always.yml rename .github/workflows/{docs.patch.yml => docs-deploy-firebase.patch.yml} (100%) rename .github/workflows/{docs.yml => docs-deploy-firebase.yml} (100%) rename .github/workflows/{dockerhub-description.yml => docs-dockerhub-description.yml} (100%) rename .github/workflows/{find-cached-disks.yml => manual-find-cached-disks.yml} (100%) rename .github/workflows/{zcashd-manual-deploy.yml => manual-zcashd-deploy.yml} (100%) rename .github/workflows/{build-docker-image.yml => sub-build-docker-image.yml} (100%) rename .github/workflows/{zcash-lightwalletd.patch.yml => sub-build-lightwalletd.patch.yml} (90%) rename .github/workflows/{zcash-lightwalletd.yml => sub-build-lightwalletd.yml} (97%) rename .github/workflows/{zcash-params.yml => sub-build-zcash-params.yml} (86%) diff --git a/.github/workflows/continous-delivery.yml b/.github/workflows/cd-deploy-nodes-gcp.yml similarity index 73% rename from .github/workflows/continous-delivery.yml rename to .github/workflows/cd-deploy-nodes-gcp.yml index 1d1c6efba11..1598f969be4 100644 --- a/.github/workflows/continous-delivery.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -1,4 +1,4 @@ -name: CD +name: Deploy Nodes to GCP # Ensures that only one workflow task will run at a time. Previous deployments, if # already in process, won't get cancelled. Instead, we let the first to complete @@ -48,32 +48,12 @@ on: # - 'docker/**' # - '.dockerignore' # - '.github/workflows/continous-delivery.yml' - # - '.github/workflows/build-docker-image.yml' - - # Only runs the Docker image tests, doesn't deploy any instances - pull_request: - paths: - # code and tests - - '**/*.rs' - # hard-coded checkpoints and proptest regressions - - '**/*.txt' - # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' - # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' - # workflow definitions - - 'docker/**' - - '.dockerignore' - - '.github/workflows/continous-delivery.yml' - - '.github/workflows/find-cached-disks.yml' + # - '.github/workflows/sub-build-docker-image.yml' release: types: - published - jobs: # If a release was made we want to extract the first part of the semver from the # tag_name @@ -108,7 +88,7 @@ jobs: # The image will be commonly named `zebrad:` build: name: Build CD Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime @@ -116,87 +96,6 @@ jobs: no_cache: ${{ inputs.no_cache || false }} rust_log: info - # Test that Zebra works using the default config with the latest Zebra version. - test-configuration-file: - name: Test Zebra CD Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Make sure Zebra can sync at least one full checkpoint on mainnet - - name: Run tests using the default config - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - # show the logs, even if the job times out - docker logs --tail all --follow default-conf-tests | \ - tee --output-error=exit /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - 'net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter' - docker stop default-conf-tests - # get the exit status from docker - EXIT_STATUS=$( \ - docker wait default-conf-tests || \ - docker inspect --format "{{.State.ExitCode}}" default-conf-tests || \ - echo "missing container, or missing exit status for container" \ - ) - docker logs default-conf-tests - echo "docker exit status: $EXIT_STATUS" - if [[ "$EXIT_STATUS" = "137" ]]; then - echo "ignoring expected signal status" - exit 0 - fi - exit "$EXIT_STATUS" - - # Test reconfiguring the docker image for testnet. - test-configuration-file-testnet: - name: Test testnet Zebra CD Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Make sure Zebra can sync the genesis block on testnet - - name: Run tests using a testnet config - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - docker run --env "NETWORK=Testnet" --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - # show the logs, even if the job times out - docker logs --tail all --follow testnet-conf-tests | \ - tee --output-error=exit /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - -e 'net.*=.*Test.*estimated progress to chain tip.*Genesis' \ - -e 'net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter' - docker stop testnet-conf-tests - # get the exit status from docker - EXIT_STATUS=$( \ - docker wait testnet-conf-tests || \ - docker inspect --format "{{.State.ExitCode}}" testnet-conf-tests || \ - echo "missing container, or missing exit status for container" \ - ) - docker logs testnet-conf-tests - echo "docker exit status: $EXIT_STATUS" - if [[ "$EXIT_STATUS" = "137" ]]; then - echo "ignoring expected signal status" - exit 0 - fi - exit "$EXIT_STATUS" - # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, # with one node in the configured GCP region. # @@ -210,14 +109,14 @@ jobs: # otherwise a new major version is deployed in a new MiG. # # Runs: - # - on every push/merge to the `main` branch + # - on every push to the `main` branch # - on every release, when it's published deploy-nodes: strategy: matrix: network: [Mainnet, Testnet] name: Deploy ${{ matrix.network }} nodes - needs: [ build, test-configuration-file, versioning ] + needs: [ build, versioning ] runs-on: ubuntu-latest timeout-minutes: 60 permissions: @@ -319,7 +218,7 @@ jobs: # Note: this instances are not automatically replaced or deleted deploy-instance: name: Deploy single ${{ inputs.network }} instance - needs: [ build, test-configuration-file ] + needs: [ build ] runs-on: ubuntu-latest timeout-minutes: 30 permissions: diff --git a/.github/workflows/cd-deploy-tests-gcp.yml b/.github/workflows/cd-deploy-tests-gcp.yml new file mode 100644 index 00000000000..791685a66f1 --- /dev/null +++ b/.github/workflows/cd-deploy-tests-gcp.yml @@ -0,0 +1,966 @@ +name: Deploy Tests to GCP + +on: + workflow_call: + inputs: + # Status and logging + test_id: + required: true + type: string + description: 'Unique identifier for the test' + test_description: + required: true + type: string + description: 'Explains what the test does' + height_grep_text: + required: false + type: string + description: 'Regular expression to find the tip height in test logs, and add it to newly created cached state image metadata' + + # Test selection and parameters + test_variables: + required: true + type: string + description: 'Environmental variables used to select and configure the test' + network: + required: false + type: string + default: Mainnet + description: 'Zcash network to test against' + is_long_test: + required: false + type: boolean + default: false + description: 'Does this test need multiple run jobs? (Does it run longer than 6 hours?)' + + # Cached state + # + # TODO: find a better name + root_state_path: + required: false + type: string + default: '/zebrad-cache' + description: 'Cached state base directory path' + # TODO: find a better name + zebra_state_dir: + required: false + type: string + default: '' + description: 'Zebra cached state directory and input image prefix to search in GCP' + # TODO: find a better name + lwd_state_dir: + required: false + type: string + default: '' + description: 'Lightwalletd cached state directory and input image prefix to search in GCP' + disk_prefix: + required: false + type: string + default: 'zebrad-cache' + description: 'Image name prefix, and `zebra_state_dir` name for newly created cached states' + disk_suffix: + required: false + type: string + description: 'Image name suffix' + needs_zebra_state: + required: true + type: boolean + description: 'Does the test use Zebra cached state?' + needs_lwd_state: + required: false + type: boolean + description: 'Does the test use Lightwalletd and Zebra cached state?' + # main branch states can be outdated and slower, but they can also be more reliable + prefer_main_cached_state: + required: false + type: boolean + default: false + description: 'Does the test prefer to use a main branch cached state?' + saves_to_disk: + required: true + type: boolean + description: 'Can this test create new or updated cached state disks?' + force_save_to_disk: + required: false + type: boolean + default: false + description: 'Force this test to create a new or updated cached state disk' + app_name: + required: false + type: string + default: 'zebra' + description: 'Application name, used to work out when a job is an update job' + +env: + # How many previous log lines we show at the start of each new log job. + # Increase this number if some log lines are skipped between jobs + # + # We want to show all the logs since the last job finished, + # but we don't know how long it will be between jobs. + # 200 lines is about 6-15 minutes of sync logs, or one panic log. + EXTRA_LOG_LINES: 200 + # How many blocks to wait before creating an updated cached state image. + # 1 day is approximately 1152 blocks. + CACHED_STATE_UPDATE_LIMIT: 576 + +jobs: + # set up and launch the test, if it doesn't use any cached state + # each test runs one of the *-with/without-cached-state job series, and skips the other + launch-without-cached-state: + name: Launch ${{ inputs.test_id }} test + if: ${{ !inputs.needs_zebra_state }} + runs-on: zfnd-runners + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v4.0.0 + with: + persist-credentials: false + fetch-depth: '2' + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Makes the Zcash network name lowercase. + # + # Labels in GCP are required to be in lowercase, but the blockchain network + # uses sentence case, so we need to downcase ${{ inputs.network }}. + # + # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. + - name: Downcase network name for labels + run: | + NETWORK_CAPS="${{ inputs.network }}" + echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" + + # Install our SSH secret + - name: Install private SSH key + uses: shimataro/ssh-key-action@v2.5.1 + with: + key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} + name: google_compute_engine + known_hosts: unnecessary + + - name: Generate public SSH key + run: | + sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client + ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1.1.1 + + # Create a Compute Engine virtual machine + - name: Create ${{ inputs.test_id }} GCP compute instance + id: create-instance + run: | + gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + --boot-disk-size 300GB \ + --boot-disk-type pd-ssd \ + --image-project=cos-cloud \ + --image-family=cos-stable \ + --create-disk=name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ + --container-image=gcr.io/google-containers/busybox \ + --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ + --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ + --scopes cloud-platform \ + --metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \ + --metadata-from-file=startup-script=.github/workflows/scripts/gcp-vm-startup-script.sh \ + --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ + --tags ${{ inputs.app_name }} \ + --zone ${{ vars.GCP_ZONE }} + sleep 60 + + # Create a docker volume with the new disk we just created. + # + # SSH into the just created VM, and create a docker volume with the newly created disk. + - name: Create ${{ inputs.test_id }} Docker volume + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo mkfs.ext4 -v /dev/sdb \ + && \ + sudo docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ + ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ + " + + # Launch the test without any cached state + - name: Launch ${{ inputs.test_id }} test + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo docker run \ + --name ${{ inputs.test_id }} \ + --tty \ + --detach \ + ${{ inputs.test_variables }} \ + --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ + ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ + " + + # set up and launch the test, if it uses cached state + # each test runs one of the *-with/without-cached-state job series, and skips the other + launch-with-cached-state: + name: Launch ${{ inputs.test_id }} test + if: ${{ inputs.needs_zebra_state }} + runs-on: zfnd-runners + outputs: + cached_disk_name: ${{ steps.get-disk-name.outputs.cached_disk_name }} + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v4.0.0 + with: + persist-credentials: false + fetch-depth: '2' + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Downcase network name for disks and labels + run: | + NETWORK_CAPS="${{ inputs.network }}" + echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" + + # Install our SSH secret + - name: Install private SSH key + uses: shimataro/ssh-key-action@v2.5.1 + with: + key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} + name: google_compute_engine + known_hosts: unnecessary + + - name: Generate public SSH key + run: | + sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client + ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1.1.1 + + # Find a cached state disk for this job, matching all of: + # - disk cached state (lwd_state_dir/zebra_state_dir or disk_prefix) - zebrad-cache or lwd-cache + # - state version (from the source code) - v{N} + # - network (network) - mainnet or testnet + # - disk target height kind (disk_suffix) - checkpoint or tip + # + # If the test needs a lightwalletd state (needs_lwd_state) set the variable DISK_PREFIX accordingly + # - To ${{ inputs.lwd_state_dir }}" if needed + # - To ${{ inputs.zebra_state_dir || inputs.disk_prefix }} if not + # + # If there are multiple disks: + # - prefer images generated from the same commit, then + # - if prefer_main_cached_state is true, prefer images from the `main` branch, then + # - use any images from any other branch or commit. + # Within each of these categories: + # - prefer newer images to older images + # + # Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable + # Passes the state version to subsequent steps using $STATE_VERSION env variable + # + # TODO: move this script into a file, and call it from manual-find-cached-disks.yml as well. + - name: Find ${{ inputs.test_id }} cached state disk + id: get-disk-name + run: | + LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) + echo "STATE_VERSION: $LOCAL_STATE_VERSION" + + if [[ "${{ inputs.needs_lwd_state }}" == "true" ]]; then + DISK_PREFIX=${{ inputs.lwd_state_dir }} + else + DISK_PREFIX=${{ inputs.zebra_state_dir || inputs.disk_prefix }} + fi + + # Try to find an image generated from a previous step or run of this commit. + # Fields are listed in the "Create image from state disk" step. + # + # We don't want to match the full branch name here, because: + # - we want to ignore the different GITHUB_REFs across manually triggered jobs, + # pushed branches, and PRs, + # - previous commits might have been buggy, + # or they might have worked and hide bugs in this commit + # (we can't avoid this issue entirely, but we don't want to make it more likely), and + # - the branch name might have been shortened for the image. + # + # The probability of two matching short commit hashes within the same month is very low. + COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" + COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + echo "${GITHUB_REF_SLUG_URL}-${{ env.GITHUB_SHA_SHORT }} Disk: $COMMIT_CACHED_DISK_NAME" + if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then + echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')" + fi + + # Try to find an image generated from the main branch + MAIN_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + echo "main Disk: $MAIN_CACHED_DISK_NAME" + if [[ -n "$MAIN_CACHED_DISK_NAME" ]]; then + echo "Description: $(gcloud compute images describe $MAIN_CACHED_DISK_NAME --format='value(DESCRIPTION)')" + fi + + # Try to find an image generated from any other branch + ANY_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) + echo "any branch Disk: $ANY_CACHED_DISK_NAME" + if [[ -n "$ANY_CACHED_DISK_NAME" ]]; then + echo "Description: $(gcloud compute images describe $ANY_CACHED_DISK_NAME --format='value(DESCRIPTION)')" + fi + + # Select a cached disk based on the job settings + CACHED_DISK_NAME="$COMMIT_CACHED_DISK_NAME" + if [[ -z "$CACHED_DISK_NAME" ]] && [[ "${{ inputs.prefer_main_cached_state }}" == "true" ]]; then + echo "Preferring main branch cached state to other branches..." + CACHED_DISK_NAME="$MAIN_CACHED_DISK_NAME" + fi + if [[ -z "$CACHED_DISK_NAME" ]]; then + CACHED_DISK_NAME="$ANY_CACHED_DISK_NAME" + fi + + if [[ -z "$CACHED_DISK_NAME" ]]; then + echo "No cached state disk available" + echo "Expected ${COMMIT_DISK_PREFIX}" + echo "Also searched for cached disks from other branches" + echo "Cached state test jobs must depend on the cached state rebuild job" + exit 1 + fi + + echo "Selected Disk: $CACHED_DISK_NAME" + echo "cached_disk_name=$CACHED_DISK_NAME" >> "$GITHUB_OUTPUT" + + echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV" + echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> "$GITHUB_ENV" + + # Create a Compute Engine virtual machine and attach a cached state disk using the + # $CACHED_DISK_NAME variable as the source image to populate the disk cached state + - name: Create ${{ inputs.test_id }} GCP compute instance + id: create-instance + run: | + gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ + --boot-disk-size 300GB \ + --boot-disk-type pd-ssd \ + --image-project=cos-cloud \ + --image-family=cos-stable \ + --create-disk=image=${{ env.CACHED_DISK_NAME }},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ + --container-image=gcr.io/google-containers/busybox \ + --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ + --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ + --scopes cloud-platform \ + --metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \ + --metadata-from-file=startup-script=.github/workflows/scripts/gcp-vm-startup-script.sh \ + --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ + --tags ${{ inputs.app_name }} \ + --zone ${{ vars.GCP_ZONE }} + sleep 60 + + # Create a docker volume with the selected cached state. + # + # SSH into the just created VM and create a docker volume with the recently attached disk. + # (The cached state and disk are usually the same size, + # but the cached state can be smaller if we just increased the disk size.) + - name: Create ${{ inputs.test_id }} Docker volume + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ + ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ + " + + # Launch the test with the previously created Zebra-only cached state. + # Each test runs one of the "Launch test" steps, and skips the other. + # + # SSH into the just created VM, and create a Docker container to run the incoming test + # from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job. + # + # The disk mounted in the VM is located at /dev/sdb, we mount the root `/` of this disk to the docker + # container in one path: + # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR + # + # This path must match the variable used by the tests in Rust, which are also set in + # `ci-unit-tests-docker.yml` to be able to run this tests. + # + # Although we're mounting the disk root, Zebra will only respect the values from + # $ZEBRA_CACHED_STATE_DIR. The inputs like ${{ inputs.zebra_state_dir }} are only used + # to match that variable paths. + - name: Launch ${{ inputs.test_id }} test + # This step only runs for tests that just read or write a Zebra state. + # + # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. + # TODO: we should find a better logic for this use cases + if: ${{ (inputs.needs_zebra_state && !inputs.needs_lwd_state) && inputs.test_id != 'lwd-full-sync' }} + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo docker run \ + --name ${{ inputs.test_id }} \ + --tty \ + --detach \ + ${{ inputs.test_variables }} \ + --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ + ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ + " + + # Launch the test with the previously created Lightwalletd and Zebra cached state. + # Each test runs one of the "Launch test" steps, and skips the other. + # + # SSH into the just created VM, and create a Docker container to run the incoming test + # from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job. + # + # In this step we're using the same disk for simplicity, as mounting multiple disks to the + # VM and to the container might require more steps in this workflow, and additional + # considerations. + # + # The disk mounted in the VM is located at /dev/sdb, we mount the root `/` of this disk to the docker + # container in two different paths: + # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR + # - /var/cache/lwd-cache -> ${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} -> $LIGHTWALLETD_DATA_DIR + # + # This doesn't cause any path conflicts, because Zebra and lightwalletd create different + # subdirectories for their data. (But Zebra, lightwalletd, and the test harness must not + # delete the whole cache directory.) + # + # This paths must match the variables used by the tests in Rust, which are also set in + # `ci-unit-tests-docker.yml` to be able to run this tests. + # + # Although we're mounting the disk root to both directories, Zebra and Lightwalletd + # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, + # the inputs like ${{ inputs.lwd_state_dir }} are only used to match those variables paths. + - name: Launch ${{ inputs.test_id }} test + # This step only runs for tests that read or write Lightwalletd and Zebra states. + # + # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. + # TODO: we should find a better logic for this use cases + if: ${{ (inputs.needs_zebra_state && inputs.needs_lwd_state) || inputs.test_id == 'lwd-full-sync' }} + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo docker run \ + --name ${{ inputs.test_id }} \ + --tty \ + --detach \ + ${{ inputs.test_variables }} \ + --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ + --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} \ + ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ + " + + # Show all the test logs, then follow the logs of the test we just launched, until it finishes. + # Then check the result of the test. + # + # If `inputs.is_long_test` is `true`, the timeout is 5 days, otherwise it's 3 hours. + test-result: + name: Run ${{ inputs.test_id }} test + # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. + needs: [ launch-with-cached-state, launch-without-cached-state ] + # If the previous job fails, we also want to run and fail this job, + # so that the branch protection rule fails in Mergify and GitHub. + if: ${{ !cancelled() }} + timeout-minutes: ${{ inputs.is_long_test && 7200 || 180 }} + runs-on: zfnd-runners + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v4.0.0 + with: + persist-credentials: false + fetch-depth: '2' + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Install our SSH secret + - name: Install private SSH key + uses: shimataro/ssh-key-action@v2.5.1 + with: + key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} + name: google_compute_engine + known_hosts: unnecessary + + - name: Generate public SSH key + run: | + sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client + ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + retries: '3' + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1.1.1 + + # Show all the logs since the container launched, + # following until we see zebrad startup messages. + # + # This check limits the number of log lines, so tests running on the wrong network don't + # run until the job timeout. If Zebra does a complete recompile, there are a few hundred log + # lines before the startup logs. So that's what we use here. + # + # The log pipeline ignores the exit status of `docker logs`. + # It also ignores the expected 'broken pipe' error from `tee`, + # which happens when `grep` finds a matching output and moves on to the next job. + # + # Errors in the tests are caught by the final test status job. + - name: Check startup logs for ${{ inputs.test_id }} + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command \ + "\ + sudo docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + head -700 | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'Zcash network: ${{ inputs.network }}' \ + " + + # Check that the container executed at least 1 Rust test harness test, and that all tests passed. + # Then wait for the container to finish, and exit with the test's exit status. + # Also shows all the test logs. + # + # If the container has already finished, `docker wait` should return its status. + # But sometimes this doesn't work, so we use `docker inspect` as a fallback. + # + # `docker wait` prints the container exit status as a string, but we need to exit the `ssh` command + # with that status. + # (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.) + - name: Result of ${{ inputs.test_id }} test + run: | + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command=' \ + set -e; + set -o pipefail; + trap '' PIPE; + + sudo docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + "test result: .*ok.* [1-9][0-9]* passed.*finished in"; \ + + EXIT_STATUS=$( \ + sudo docker wait ${{ inputs.test_id }} || \ + sudo docker inspect --format "{{.State.ExitCode}}" ${{ inputs.test_id }} || \ + echo "missing container, or missing exit status for container" \ + ); \ + + echo "sudo docker exit status: $EXIT_STATUS"; \ + exit "$EXIT_STATUS" \ + ' + + # create a state image from the instance's state disk, if requested by the caller + create-state-image: + name: Create ${{ inputs.test_id }} cached state image + runs-on: ubuntu-latest + needs: [ test-result, launch-with-cached-state ] + # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. + # Normally, if a job is skipped, all the jobs that depend on it are also skipped. + # So we need to override the default success() check to make this job run. + if: ${{ !cancelled() && !failure() && (inputs.saves_to_disk || inputs.force_save_to_disk) }} + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v4.0.0 + with: + persist-credentials: false + fetch-depth: '2' + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Performs formatting on disk name components. + # + # Disk images in GCP are required to be in lowercase, but the blockchain network + # uses sentence case, so we need to downcase ${{ inputs.network }}. + # + # Disk image names in GCP are limited to 63 characters, so we need to limit + # branch names to 12 characters. + # + # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. + # Passes ${{ env.GITHUB_REF_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable. + - name: Format network name and branch name for disks + run: | + NETWORK_CAPS="${{ inputs.network }}" + echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" + LONG_GITHUB_REF="${{ env.GITHUB_REF_SLUG_URL }}" + echo "SHORT_GITHUB_REF=${LONG_GITHUB_REF:0:12}" >> "$GITHUB_ENV" + + # Install our SSH secret + - name: Install private SSH key + uses: shimataro/ssh-key-action@v2.5.1 + with: + key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} + name: google_compute_engine + known_hosts: unnecessary + + - name: Generate public SSH key + run: | + sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client + ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1.1.1 + + # Get the state version from the local constants.rs file to be used in the image creation, + # as the state version is part of the disk image name. + # + # Passes the state version to subsequent steps using $STATE_VERSION env variable + - name: Get state version from constants.rs + run: | + LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" $GITHUB_WORKSPACE/zebra-state/src/constants.rs | grep -oE "[0-9]+" | tail -n1) + echo "STATE_VERSION: $LOCAL_STATE_VERSION" + + echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV" + + # Sets the $UPDATE_SUFFIX env var to "-u" if updating a previous cached state, + # and the empty string otherwise. + # + # Also sets a unique date and time suffix $TIME_SUFFIX. + - name: Set update and time suffixes + run: | + UPDATE_SUFFIX="" + + if [[ "${{ inputs.needs_zebra_state }}" == "true" ]] && [[ "${{ inputs.app_name }}" == "zebrad" ]]; then + UPDATE_SUFFIX="-u" + fi + + # TODO: find a better logic for the lwd-full-sync case + if [[ "${{ inputs.needs_lwd_state }}" == "true" ]] && [[ "${{ inputs.app_name }}" == "lightwalletd" ]] && [[ "${{ inputs.test_id }}" != 'lwd-full-sync' ]]; then + UPDATE_SUFFIX="-u" + fi + + # We're going to delete old images after a few days, so we only need the time here + TIME_SUFFIX=$(date '+%H%M%S' --utc) + + echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> "$GITHUB_ENV" + echo "TIME_SUFFIX=$TIME_SUFFIX" >> "$GITHUB_ENV" + + # Get the full initial and running database versions from the test logs. + # These versions are used as part of the disk description and labels. + # + # If these versions are missing from the logs, the job fails. + # + # Typically, the database versions are around line 20 in the logs.. + # But we check the first 1000 log lines, just in case the test harness recompiles all the + # dependencies before running the test. (This can happen if the cache is invalid.) + # + # Passes the versions to subsequent steps using the $INITIAL_DISK_DB_VERSION, + # $RUNNING_DB_VERSION, and $DB_VERSION_SUMMARY env variables. + - name: Get database versions from logs + run: | + INITIAL_DISK_DB_VERSION="" + RUNNING_DB_VERSION="" + DB_VERSION_SUMMARY="" + + DOCKER_LOGS=$( \ + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command=" \ + sudo docker logs ${{ inputs.test_id }} | head -1000 \ + ") + + # either a semantic version or "creating new database" + INITIAL_DISK_DB_VERSION=$( \ + echo "$DOCKER_LOGS" | \ + grep --extended-regexp --only-matching 'initial disk state version: [0-9a-z\.]+' | \ + grep --extended-regexp --only-matching '[0-9a-z\.]+' | \ + tail -1 || \ + [[ $? == 1 ]] \ + ) + + if [[ -z "$INITIAL_DISK_DB_VERSION" ]]; then + echo "Checked logs:" + echo "" + echo "$DOCKER_LOGS" + echo "" + echo "Missing initial disk database version in logs: $INITIAL_DISK_DB_VERSION" + # Fail the tests, because Zebra didn't log the initial disk database version, + # or the regex in this step is wrong. + false + fi + + if [[ "$INITIAL_DISK_DB_VERSION" = "creating.new.database" ]]; then + INITIAL_DISK_DB_VERSION="new" + else + INITIAL_DISK_DB_VERSION="v${INITIAL_DISK_DB_VERSION//./-}" + fi + + echo "Found initial disk database version in logs: $INITIAL_DISK_DB_VERSION" + echo "INITIAL_DISK_DB_VERSION=$INITIAL_DISK_DB_VERSION" >> "$GITHUB_ENV" + + RUNNING_DB_VERSION=$( \ + echo "$DOCKER_LOGS" | \ + grep --extended-regexp --only-matching 'running state version: [0-9\.]+' | \ + grep --extended-regexp --only-matching '[0-9\.]+' | \ + tail -1 || \ + [[ $? == 1 ]] \ + ) + + if [[ -z "$RUNNING_DB_VERSION" ]]; then + echo "Checked logs:" + echo "" + echo "$DOCKER_LOGS" + echo "" + echo "Missing running database version in logs: $RUNNING_DB_VERSION" + # Fail the tests, because Zebra didn't log the running database version, + # or the regex in this step is wrong. + false + fi + + RUNNING_DB_VERSION="v${RUNNING_DB_VERSION//./-}" + echo "Found running database version in logs: $RUNNING_DB_VERSION" + echo "RUNNING_DB_VERSION=$RUNNING_DB_VERSION" >> "$GITHUB_ENV" + + if [[ "$INITIAL_DISK_DB_VERSION" = "$RUNNING_DB_VERSION" ]]; then + DB_VERSION_SUMMARY="$RUNNING_DB_VERSION" + elif [[ "$INITIAL_DISK_DB_VERSION" = "new" ]]; then + DB_VERSION_SUMMARY="$RUNNING_DB_VERSION in new database" + else + DB_VERSION_SUMMARY="$INITIAL_DISK_DB_VERSION changing to $RUNNING_DB_VERSION" + fi + + echo "Summarised database versions from logs: $DB_VERSION_SUMMARY" + echo "DB_VERSION_SUMMARY=$DB_VERSION_SUMMARY" >> "$GITHUB_ENV" + + # Get the sync height from the test logs, which is later used as part of the + # disk description and labels. + # + # The regex used to grep the sync height is provided by ${{ inputs.height_grep_text }}, + # this allows to dynamically change the height as needed by different situations or + # based on the logs output from different tests. + # + # If the sync height is missing from the logs, the job fails. + # + # Passes the sync height to subsequent steps using the $SYNC_HEIGHT env variable. + - name: Get sync height from logs + run: | + SYNC_HEIGHT="" + + DOCKER_LOGS=$( \ + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ + --zone ${{ vars.GCP_ZONE }} \ + --ssh-flag="-o ServerAliveInterval=5" \ + --ssh-flag="-o ConnectionAttempts=20" \ + --ssh-flag="-o ConnectTimeout=5" \ + --command=" \ + sudo docker logs ${{ inputs.test_id }} --tail 200 \ + ") + + SYNC_HEIGHT=$( \ + echo "$DOCKER_LOGS" | \ + grep --extended-regexp --only-matching '${{ inputs.height_grep_text }}[0-9]+' | \ + grep --extended-regexp --only-matching '[0-9]+' | \ + tail -1 || \ + [[ $? == 1 ]] \ + ) + + if [[ -z "$SYNC_HEIGHT" ]]; then + echo "Checked logs:" + echo "" + echo "$DOCKER_LOGS" + echo "" + echo "Missing sync height in logs: $SYNC_HEIGHT" + # Fail the tests, because Zebra and lightwalletd didn't log their sync heights, + # or the CI workflow sync height regex is wrong. + false + fi + + echo "Found sync height in logs: $SYNC_HEIGHT" + echo "SYNC_HEIGHT=$SYNC_HEIGHT" >> "$GITHUB_ENV" + + # Get the original cached state height from google cloud. + # + # If the height is missing from the image labels, uses zero instead. + # + # TODO: fail the job if needs_zebra_state but the height is missing + # we can make this change after all the old images have been deleted, this should happen around 15 September 2022 + # we'll also need to do a manual checkpoint rebuild before opening the PR for this change + # + # Passes the original height to subsequent steps using $ORIGINAL_HEIGHT env variable. + - name: Get original cached state height from google cloud + run: | + ORIGINAL_HEIGHT="0" + ORIGINAL_DISK_NAME="${{ format('{0}', needs.launch-with-cached-state.outputs.cached_disk_name) }}" + + if [[ -n "$ORIGINAL_DISK_NAME" ]]; then + ORIGINAL_HEIGHT=$(gcloud compute images list --filter="status=READY AND name=$ORIGINAL_DISK_NAME" --format="value(labels.height)") + ORIGINAL_HEIGHT=${ORIGINAL_HEIGHT:-0} + echo "$ORIGINAL_DISK_NAME height: $ORIGINAL_HEIGHT" + else + ORIGINAL_DISK_NAME="new-disk" + echo "newly created disk, original height set to 0" + fi + + echo "ORIGINAL_HEIGHT=$ORIGINAL_HEIGHT" >> "$GITHUB_ENV" + echo "ORIGINAL_DISK_NAME=$ORIGINAL_DISK_NAME" >> "$GITHUB_ENV" + + # Create an image from the state disk, which will be used for any tests that start + # after it is created. These tests can be in the same workflow, or in a different PR. + # + # Using the newest image makes future jobs faster, because it is closer to the chain tip. + # + # Skips creating updated images if the original image is less than $CACHED_STATE_UPDATE_LIMIT behind the current tip. + # Full sync images are always created. + # + # The image can contain: + # - Zebra cached state, or + # - Zebra + lightwalletd cached state. + # Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }}. + # + # Google Cloud doesn't have an atomic image replacement operation. + # We don't want to delete and re-create the image, because that causes a ~5 minute + # window where might be no recent image. So we add an extra image with a unique name, + # which gets selected because it has a later creation time. + # This also simplifies the process of deleting old images, + # because we don't have to worry about accidentally deleting all the images. + # + # The timestamp makes images from the same commit unique, + # as long as they don't finish in the same second. + # (This is unlikely, because each image created by a workflow has a different name.) + # + # The image name must also be 63 characters or less. + # + # Force the image creation (--force) as the disk is still attached even though is not being + # used by the container. + - name: Create image from state disk + run: | + MINIMUM_UPDATE_HEIGHT=$((ORIGINAL_HEIGHT+CACHED_STATE_UPDATE_LIMIT)) + if [[ -z "$UPDATE_SUFFIX" ]] || [[ "$SYNC_HEIGHT" -gt "$MINIMUM_UPDATE_HEIGHT" ]] || [[ "${{ inputs.force_save_to_disk }}" == "true" ]]; then + gcloud compute images create \ + "${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${NETWORK}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \ + --force \ + --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ + --source-disk-zone=${{ vars.GCP_ZONE }} \ + --storage-location=us \ + --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }} and database format ${{ env.DB_VERSION_SUMMARY }}" \ + --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},updated-from-disk=${ORIGINAL_DISK_NAME},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" + else + echo "Skipped cached state update because the new sync height $SYNC_HEIGHT was less than $CACHED_STATE_UPDATE_LIMIT blocks above the original height $ORIGINAL_HEIGHT of $ORIGINAL_DISK_NAME" + fi + + # delete the Google Cloud instance for this test + delete-instance: + name: Delete ${{ inputs.test_id }} instance + runs-on: ubuntu-latest + needs: [ create-state-image ] + # If a disk generation step timeouts (+6 hours) the previous job (creating the image) will be skipped. + # Even if the instance continues running, no image will be created, so it's better to delete it. + if: always() + continue-on-error: true + permissions: + contents: 'read' + id-token: 'write' + steps: + - uses: actions/checkout@v4.0.0 + with: + persist-credentials: false + fetch-depth: '2' + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Setup gcloud CLI + - name: Authenticate to Google Cloud + id: auth + uses: google-github-actions/auth@v1.1.1 + with: + workload_identity_provider: '${{ vars.GCP_WIF }}' + service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' + + - name: Set up Cloud SDK + uses: google-github-actions/setup-gcloud@v1.1.1 + + # Deletes the instances that has been recently deployed in the actual commit after all + # previous jobs have run, no matter the outcome of the job. + - name: Delete test instance + continue-on-error: true + run: | + INSTANCE=$(gcloud compute instances list --filter=${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} --format='value(NAME)') + if [ -z "${INSTANCE}" ]; then + echo "No instance to delete" + else + gcloud compute instances delete "${INSTANCE}" --zone "${{ vars.GCP_ZONE }}" --delete-disks all --quiet + fi diff --git a/.github/workflows/delete-gcp-resources.yml b/.github/workflows/chore-delete-gcp-resources.yml similarity index 100% rename from .github/workflows/delete-gcp-resources.yml rename to .github/workflows/chore-delete-gcp-resources.yml diff --git a/.github/workflows/deploy-gcp-tests.yml b/.github/workflows/chore-deploy-gcp-tests.yml similarity index 99% rename from .github/workflows/deploy-gcp-tests.yml rename to .github/workflows/chore-deploy-gcp-tests.yml index b51e6d08bb0..78acc9ae27a 100644 --- a/.github/workflows/deploy-gcp-tests.yml +++ b/.github/workflows/chore-deploy-gcp-tests.yml @@ -292,7 +292,7 @@ jobs: # Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable # Passes the state version to subsequent steps using $STATE_VERSION env variable # - # TODO: move this script into a file, and call it from find-cached-disks.yml as well. + # TODO: move this script into a file, and call it from manual-find-cached-disks.yml as well. - name: Find ${{ inputs.test_id }} cached state disk id: get-disk-name run: | @@ -413,7 +413,7 @@ jobs: # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR # # This path must match the variable used by the tests in Rust, which are also set in - # `continous-integration-docker.yml` to be able to run this tests. + # `ci-unit-tests-docker.yml` to be able to run this tests. # # Although we're mounting the disk root, Zebra will only respect the values from # $ZEBRA_CACHED_STATE_DIR. The inputs like ${{ inputs.zebra_state_dir }} are only used @@ -461,7 +461,7 @@ jobs: # delete the whole cache directory.) # # This paths must match the variables used by the tests in Rust, which are also set in - # `continous-integration-docker.yml` to be able to run this tests. + # `ci-unit-tests-docker.yml` to be able to run this tests. # # Although we're mounting the disk root to both directories, Zebra and Lightwalletd # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, diff --git a/.github/workflows/project-management.yml b/.github/workflows/chore-project-management.yml similarity index 100% rename from .github/workflows/project-management.yml rename to .github/workflows/chore-project-management.yml diff --git a/.github/workflows/build-crates-individually.patch.yml b/.github/workflows/ci-build-crates.patch.yml similarity index 97% rename from .github/workflows/build-crates-individually.patch.yml rename to .github/workflows/ci-build-crates.patch.yml index 6b5b028ed61..f00c2aca9f6 100644 --- a/.github/workflows/build-crates-individually.patch.yml +++ b/.github/workflows/ci-build-crates.patch.yml @@ -14,7 +14,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/build-crates-individually.yml' + - '.github/workflows/ci-build-crates.yml' jobs: matrix: diff --git a/.github/workflows/build-crates-individually.yml b/.github/workflows/ci-build-crates.yml similarity index 97% rename from .github/workflows/build-crates-individually.yml rename to .github/workflows/ci-build-crates.yml index 505d9796921..d2c689ed2c6 100644 --- a/.github/workflows/build-crates-individually.yml +++ b/.github/workflows/ci-build-crates.yml @@ -22,7 +22,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/build-crates-individually.yml' + - '.github/workflows/ci-build-crates.yml' pull_request: paths: # production code and test code @@ -34,7 +34,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/build-crates-individually.yml' + - '.github/workflows/ci-build-crates.yml' env: CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} diff --git a/.github/workflows/coverage.patch.yml b/.github/workflows/ci-coverage.patch.yml similarity index 93% rename from .github/workflows/coverage.patch.yml rename to .github/workflows/ci-coverage.patch.yml index 241f92e73e5..e2defe8ee7b 100644 --- a/.github/workflows/coverage.patch.yml +++ b/.github/workflows/ci-coverage.patch.yml @@ -12,7 +12,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' - 'codecov.yml' - - '.github/workflows/coverage.yml' + - '.github/workflows/ci-coverage.yml' jobs: coverage: diff --git a/.github/workflows/coverage.yml b/.github/workflows/ci-coverage.yml similarity index 97% rename from .github/workflows/coverage.yml rename to .github/workflows/ci-coverage.yml index df85f861af1..eecc3e8aff2 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -27,7 +27,7 @@ on: - '**/clippy.toml' # workflow definitions - 'codecov.yml' - - '.github/workflows/coverage.yml' + - '.github/workflows/ci-coverage.yml' pull_request: paths: - '**/*.rs' @@ -38,7 +38,7 @@ on: - '.cargo/config.toml' - '**/clippy.toml' - 'codecov.yml' - - '.github/workflows/coverage.yml' + - '.github/workflows/ci-coverage.yml' env: CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} diff --git a/.github/workflows/continous-integration-docker.patch.yml b/.github/workflows/ci-integration-tests-gcp.patch.yml similarity index 67% rename from .github/workflows/continous-integration-docker.patch.yml rename to .github/workflows/ci-integration-tests-gcp.patch.yml index 489d75b01b3..bd58dfc7e49 100644 --- a/.github/workflows/continous-integration-docker.patch.yml +++ b/.github/workflows/ci-integration-tests-gcp.patch.yml @@ -1,4 +1,4 @@ -name: CI Docker +name: Integration Tests on GCP # These jobs *don't* depend on cached Google Cloud state disks, # so they can be skipped when the modified files make the actual workflow run. @@ -20,10 +20,10 @@ on: # workflow definitions - 'docker/**' - '.dockerignore' - - '.github/workflows/continous-integration-docker.yml' - - '.github/workflows/deploy-gcp-tests.yml' - - '.github/workflows/find-cached-disks.yml' - - '.github/workflows/build-docker-image.yml' + - '.github/workflows/ci-unit-tests-docker.yml' + - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-build-docker-image.yml' jobs: # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) @@ -39,48 +39,6 @@ jobs: steps: - run: 'echo "No build required"' - test-all: - name: Test all - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-all-getblocktemplate-rpcs: - name: Test all with getblocktemplate-rpcs feature - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-fake-activation-heights: - name: Test with fake activation heights - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-empty-sync: - name: Test checkpoint sync from empty state - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-lightwalletd-integration: - name: Test integration with lightwalletd - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-configuration-file: - name: Test Zebra default Docker config file - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-zebra-conf-path: - name: Test Zebra custom Docker config file - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - test-stateful-sync: name: Zebra checkpoint update / Run sync-past-checkpoint test runs-on: ubuntu-latest diff --git a/.github/workflows/continous-integration-docker.yml b/.github/workflows/ci-integration-tests-gcp.yml similarity index 71% rename from .github/workflows/continous-integration-docker.yml rename to .github/workflows/ci-integration-tests-gcp.yml index 742c99b201f..4c2f38713e6 100644 --- a/.github/workflows/continous-integration-docker.yml +++ b/.github/workflows/ci-integration-tests-gcp.yml @@ -1,4 +1,4 @@ -name: CI Docker +name: Integration Tests on GCP # Ensures that only one workflow task will run at a time. Previous builds, if # already in process, will get cancelled. Only the latest commit will be allowed @@ -62,10 +62,10 @@ on: - '**/clippy.toml' # workflow definitions - 'docker/**' - - '.github/workflows/continous-integration-docker.yml' - - '.github/workflows/deploy-gcp-tests.yml' - - '.github/workflows/build-docker-image.yml' - - '.github/workflows/find-cached-disks.yml' + - '.github/workflows/ci-integration-tests-gcp.yml' + - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/sub-build-docker-image.yml' + - '.github/workflows/manual-find-cached-disks.yml' push: branches: @@ -86,10 +86,10 @@ on: # workflow definitions - 'docker/**' - '.dockerignore' - - '.github/workflows/continous-integration-docker.yml' - - '.github/workflows/deploy-gcp-tests.yml' - - '.github/workflows/find-cached-disks.yml' - - '.github/workflows/build-docker-image.yml' + - '.github/workflows/ci-integration-tests-gcp.yml' + - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-build-docker-image.yml' jobs: # to also run a job on Mergify head branches, @@ -101,20 +101,20 @@ jobs: # The default network is mainnet unless a manually triggered workflow or repository variable # is configured differently. # - # The outputs for this job have the same names as the workflow outputs in find-cached-disks.yml + # The outputs for this job have the same names as the workflow outputs in manual-find-cached-disks.yml get-available-disks: name: Check if cached state disks exist for ${{ inputs.network || vars.ZCASH_NETWORK }} - uses: ./.github/workflows/find-cached-disks.yml + uses: ./.github/workflows/manual-find-cached-disks.yml with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} # Check if the cached state disks used by the tests are available for testnet. # - # The outputs for this job have the same names as the workflow outputs in find-cached-disks.yml + # The outputs for this job have the same names as the workflow outputs in manual-find-cached-disks.yml # Some outputs are ignored, because we don't run those jobs on testnet. get-available-disks-testnet: name: Check if cached state disks exist for testnet - uses: ./.github/workflows/find-cached-disks.yml + uses: ./.github/workflows/manual-find-cached-disks.yml with: network: 'Testnet' @@ -125,7 +125,7 @@ jobs: # testnet when running the image. build: name: Build CI Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: tests @@ -135,199 +135,6 @@ jobs: rust_lib_backtrace: full rust_log: info - # zebrad tests without cached state - - # TODO: make the non-cached-state tests use: - # network: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Run all the zebra tests, including tests that are ignored by default. - # Skips tests that need a cached state disk or a lightwalletd binary. - # - # - We run all the tests behind the `getblocktemplate-rpcs` feature as a separated step. - # - We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests. - # - # TODO: turn this test and the getblocktemplate test into a matrix, so the jobs use exactly the same diagnostics settings - test-all: - name: Test all - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. - # - # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. - - name: Run zebrad tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # zebrad tests without cached state with `getblocktemplate-rpcs` feature - # - # Same as above but we run all the tests behind the `getblocktemplate-rpcs` feature. - test-all-getblocktemplate-rpcs: - name: Test all with getblocktemplate-rpcs feature - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run zebrad tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --name zebrad-tests --tty -e ${{ inputs.network || vars.ZCASH_NETWORK }} ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Run state tests with fake activation heights. - # - # This test changes zebra-chain's activation heights, - # which can recompile all the Zebra crates, - # so we want its build products to be cached separately. - # - # Also, we don't want to accidentally use the fake heights in other tests. - # - # (The gRPC feature is a zebrad feature, so it isn't needed here.) - test-fake-activation-heights: - name: Test with fake activation heights - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests with fake activation heights - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights - env: - TEST_FAKE_ACTIVATION_HEIGHTS: '1' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. - # - # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) - test-empty-sync: - name: Test checkpoint sync from empty state - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run zebrad large sync tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test launching lightwalletd with an empty lightwalletd and Zebra state. - # - # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) - test-lightwalletd-integration: - name: Test integration with lightwalletd - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests with empty lightwalletd launch - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration - env: - ZEBRA_TEST_LIGHTWALLETD: '1' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test that Zebra works using the default config with the latest Zebra version - test-configuration-file: - name: Test Zebra default Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests using the default config - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start - EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) - docker stop default-conf-tests - docker logs default-conf-tests - exit "$EXIT_STATUS" - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test that Zebra works using the $ZEBRA_CONF_PATH config - test-zebra-conf-path: - name: Test Zebra custom Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest - needs: build - if: ${{ github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests using the $ZEBRA_CONF_PATH - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start - EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) - docker stop variable-conf-tests - docker logs variable-conf-tests - exit "$EXIT_STATUS" - env: - ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # END TODO: make the non-cached-state tests use: - # network: ${{ inputs.network || vars.ZCASH_NETWORK }} - # zebrad cached checkpoint state tests # Regenerate mandatory checkpoint Zebra cached state disks. @@ -340,7 +147,7 @@ jobs: regenerate-stateful-disks: name: Zebra checkpoint needs: [ build, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || github.event.inputs.regenerate-disks == 'true' }} with: app_name: zebrad @@ -369,7 +176,7 @@ jobs: test-stateful-sync: name: Zebra checkpoint update needs: [ regenerate-stateful-disks, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || needs.regenerate-stateful-disks.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -397,7 +204,7 @@ jobs: test-full-sync: name: Zebra tip needs: [ build, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }} with: app_name: zebrad @@ -440,7 +247,7 @@ jobs: test-update-sync: name: Zebra tip update needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -473,7 +280,7 @@ jobs: generate-checkpoints-mainnet: name: Generate checkpoints mainnet needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -508,7 +315,7 @@ jobs: test-full-sync-testnet: name: Zebra tip on testnet needs: [ build, get-available-disks-testnet ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }} with: app_name: zebrad @@ -554,7 +361,7 @@ jobs: generate-checkpoints-testnet: name: Generate checkpoints testnet needs: [ test-full-sync-testnet, get-available-disks-testnet ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || needs.test-full-sync-testnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -587,7 +394,7 @@ jobs: lightwalletd-full-sync: name: lightwalletd tip needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml # Currently the lightwalletd tests only work on Mainnet if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} with: @@ -627,7 +434,7 @@ jobs: lightwalletd-update-sync: name: lightwalletd tip update needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -659,7 +466,7 @@ jobs: lightwalletd-rpc-test: name: Zebra tip JSON-RPC needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -684,7 +491,7 @@ jobs: lightwalletd-transactions-test: name: lightwalletd tip send needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -711,7 +518,7 @@ jobs: lightwalletd-grpc-test: name: lightwalletd GRPC tests needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -742,7 +549,7 @@ jobs: get-block-template-test: name: get block template needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -768,7 +575,7 @@ jobs: submit-block-test: name: submit block needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/deploy-gcp-tests.yml + uses: ./.github/workflows/cd-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -789,7 +596,7 @@ jobs: # # This list is for reliable tests that are run on the `main` branch. # Testnet jobs are not in this list, because we expect testnet to fail occasionally. - needs: [ regenerate-stateful-disks, test-full-sync, lightwalletd-full-sync, test-all, test-all-getblocktemplate-rpcs, test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, test-configuration-file, test-zebra-conf-path, test-stateful-sync, test-update-sync, generate-checkpoints-mainnet, lightwalletd-update-sync, lightwalletd-rpc-test, lightwalletd-transactions-test, lightwalletd-grpc-test, get-block-template-test, submit-block-test ] + needs: [ regenerate-stateful-disks, test-full-sync, lightwalletd-full-sync, test-stateful-sync, test-update-sync, generate-checkpoints-mainnet, lightwalletd-update-sync, lightwalletd-rpc-test, lightwalletd-transactions-test, lightwalletd-grpc-test, get-block-template-test, submit-block-test ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) # TODO: if a job times out, we want to create a ticket. Does failure() do that? Or do we need cancelled()? diff --git a/.github/workflows/lint.patch.yml b/.github/workflows/ci-lint.patch.yml similarity index 100% rename from .github/workflows/lint.patch.yml rename to .github/workflows/ci-lint.patch.yml diff --git a/.github/workflows/lint.yml b/.github/workflows/ci-lint.yml similarity index 99% rename from .github/workflows/lint.yml rename to .github/workflows/ci-lint.yml index bef9e066cba..3a1f1a94f72 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/ci-lint.yml @@ -45,7 +45,7 @@ jobs: **/Cargo.lock clippy.toml .cargo/config.toml - .github/workflows/lint.yml + .github/workflows/ci-lint.yml - name: Workflow files id: changed-files-workflows diff --git a/.github/workflows/ci-unit-tests-docker.patch.yml b/.github/workflows/ci-unit-tests-docker.patch.yml new file mode 100644 index 00000000000..764b47ced71 --- /dev/null +++ b/.github/workflows/ci-unit-tests-docker.patch.yml @@ -0,0 +1,82 @@ +name: CI Docker + +# These jobs *don't* depend on cached Google Cloud state disks, +# so they can be skipped when the modified files make the actual workflow run. +on: + pull_request: + paths-ignore: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # test data snapshots + - '**/*.snap' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/ci-unit-tests-docker.yml' + - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-build-docker-image.yml' + +jobs: + # We don't patch the testnet job, because testnet isn't required to merge (it's too unstable) + get-available-disks: + name: Check if cached state disks exist for Mainnet / Check if cached state disks exist + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + build: + name: Build CI Docker / Build images + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-all: + name: Test all + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-all-getblocktemplate-rpcs: + name: Test all with getblocktemplate-rpcs feature + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-fake-activation-heights: + name: Test with fake activation heights + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-empty-sync: + name: Test checkpoint sync from empty state + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-lightwalletd-integration: + name: Test integration with lightwalletd + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-configuration-file: + name: Test Zebra default Docker config file + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' + + test-zebra-conf-path: + name: Test Zebra custom Docker config file + runs-on: ubuntu-latest + steps: + - run: 'echo "No build required"' diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml new file mode 100644 index 00000000000..3f7ee654e1d --- /dev/null +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -0,0 +1,384 @@ +name: Docker Unit Tests + +# Ensures that only one workflow task will run at a time. Previous builds, if +# already in process, will get cancelled. Only the latest commit will be allowed +# to run, cancelling any workflows in between +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + inputs: + network: + default: 'Mainnet' + description: 'Network to deploy: Mainnet or Testnet' + required: true + no_cache: + description: 'Disable the Docker cache for this build' + required: false + type: boolean + default: false + + pull_request: + paths: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # test data snapshots + - '**/*.snap' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.github/workflows/ci-unit-tests-docker.yml' + - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/sub-build-docker-image.yml' + - '.github/workflows/manual-find-cached-disks.yml' + + push: + branches: + - main + paths: + # code and tests + - '**/*.rs' + # hard-coded checkpoints and proptest regressions + - '**/*.txt' + # test data snapshots + - '**/*.snap' + # dependencies + - '**/Cargo.toml' + - '**/Cargo.lock' + # configuration files + - '.cargo/config.toml' + - '**/clippy.toml' + # workflow definitions + - 'docker/**' + - '.dockerignore' + - '.github/workflows/ci-unit-tests-docker.yml' + - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-build-docker-image.yml' + +jobs: + # to also run a job on Mergify head branches, + # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: + # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 + + # Check if the cached state disks used by the tests are available for the default network. + # + # The default network is mainnet unless a manually triggered workflow or repository variable + # is configured differently. + # + # The outputs for this job have the same names as the workflow outputs in manual-find-cached-disks.yml + get-available-disks: + name: Check if cached state disks exist for ${{ inputs.network || vars.ZCASH_NETWORK }} + uses: ./.github/workflows/manual-find-cached-disks.yml + with: + network: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Check if the cached state disks used by the tests are available for testnet. + # + # The outputs for this job have the same names as the workflow outputs in manual-find-cached-disks.yml + # Some outputs are ignored, because we don't run those jobs on testnet. + get-available-disks-testnet: + name: Check if cached state disks exist for testnet + uses: ./.github/workflows/manual-find-cached-disks.yml + with: + network: 'Testnet' + + # Build the docker image used by the tests. + # + # The default network in the Zebra config in the image is mainnet, unless a manually triggered + # workflow or repository variable is configured differently. Testnet jobs change that config to + # testnet when running the image. + build: + name: Build CI Docker + uses: ./.github/workflows/sub-build-docker-image.yml + with: + dockerfile_path: ./docker/Dockerfile + dockerfile_target: tests + image_name: ${{ vars.CI_IMAGE_NAME }} + no_cache: ${{ inputs.no_cache || false }} + rust_backtrace: full + rust_lib_backtrace: full + rust_log: info + + # zebrad tests without cached state + + # TODO: make the non-cached-state tests use: + # network: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Run all the zebra tests, including tests that are ignored by default. + # Skips tests that need a cached state disk or a lightwalletd binary. + # + # - We run all the tests behind the `getblocktemplate-rpcs` feature as a separated step. + # - We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests. + # + # TODO: turn this test and the getblocktemplate test into a matrix, so the jobs use exactly the same diagnostics settings + test-all: + name: Test all + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. + # + # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. + - name: Run zebrad tests + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # zebrad tests without cached state with `getblocktemplate-rpcs` feature + # + # Same as above but we run all the tests behind the `getblocktemplate-rpcs` feature. + test-all-getblocktemplate-rpcs: + name: Test all with getblocktemplate-rpcs feature + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run zebrad tests + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --name zebrad-tests --tty -e ${{ inputs.network || vars.ZCASH_NETWORK }} ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Run state tests with fake activation heights. + # + # This test changes zebra-chain's activation heights, + # which can recompile all the Zebra crates, + # so we want its build products to be cached separately. + # + # Also, we don't want to accidentally use the fake heights in other tests. + # + # (The gRPC feature is a zebrad feature, so it isn't needed here.) + test-fake-activation-heights: + name: Test with fake activation heights + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests with fake activation heights + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights + env: + TEST_FAKE_ACTIVATION_HEIGHTS: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. + # + # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) + test-empty-sync: + name: Test checkpoint sync from empty state + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run zebrad large sync tests + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test launching lightwalletd with an empty lightwalletd and Zebra state. + # + # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) + test-lightwalletd-integration: + name: Test integration with lightwalletd + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests with empty lightwalletd launch + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration + env: + ZEBRA_TEST_LIGHTWALLETD: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test that Zebra works using the default config with the latest Zebra version + test-configuration-file: + name: Test Zebra default Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests using the default config + run: | + set -ex + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start + EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) + docker stop default-conf-tests + docker logs default-conf-tests + exit "$EXIT_STATUS" + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test that Zebra works using the $ZEBRA_CONF_PATH config + test-zebra-conf-path: + name: Test Zebra custom Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests using the $ZEBRA_CONF_PATH + run: | + set -ex + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start + EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) + docker stop variable-conf-tests + docker logs variable-conf-tests + exit "$EXIT_STATUS" + env: + ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Make sure Zebra can sync at least one full checkpoint on mainnet + - name: Run tests using the default config + run: | + set -ex + docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + # show the logs, even if the job times out + docker logs --tail all --follow default-conf-tests | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + 'net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter' + docker stop default-conf-tests + # get the exit status from docker + EXIT_STATUS=$( \ + docker wait default-conf-tests || \ + docker inspect --format "{{.State.ExitCode}}" default-conf-tests || \ + echo "missing container, or missing exit status for container" \ + ) + docker logs default-conf-tests + echo "docker exit status: $EXIT_STATUS" + if [[ "$EXIT_STATUS" = "137" ]]; then + echo "ignoring expected signal status" + exit 0 + fi + exit "$EXIT_STATUS" + + # Test reconfiguring the docker image for testnet. + test-configuration-file-testnet: + name: Test testnet Zebra CD Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Make sure Zebra can sync the genesis block on testnet + - name: Run tests using a testnet config + run: | + set -ex + docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + docker run --env "NETWORK=Testnet" --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + # show the logs, even if the job times out + docker logs --tail all --follow testnet-conf-tests | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + -e 'net.*=.*Test.*estimated progress to chain tip.*Genesis' \ + -e 'net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter' + docker stop testnet-conf-tests + # get the exit status from docker + EXIT_STATUS=$( \ + docker wait testnet-conf-tests || \ + docker inspect --format "{{.State.ExitCode}}" testnet-conf-tests || \ + echo "missing container, or missing exit status for container" \ + ) + docker logs testnet-conf-tests + echo "docker exit status: $EXIT_STATUS" + if [[ "$EXIT_STATUS" = "137" ]]; then + echo "ignoring expected signal status" + exit 0 + fi + exit "$EXIT_STATUS" + + failure-issue: + name: Open or update issues for main branch failures + # When a new test is added to this workflow, add it to this list. + # + # This list is for reliable tests that are run on the `main` branch. + # Testnet jobs are not in this list, because we expect testnet to fail occasionally. + needs: [ test-all, test-all-getblocktemplate-rpcs, test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, test-configuration-file, test-zebra-conf-path, test-configuration-file-testnet ] + # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. + # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) + # TODO: if a job times out, we want to create a ticket. Does failure() do that? Or do we need cancelled()? + if: failure() && github.event.pull_request == null + runs-on: ubuntu-latest + steps: + - uses: jayqi/failed-build-issue-action@v1 + with: + title-template: "{{refname}} branch CI failed: {{eventName}} in {{workflow}}" + # New failures open an issue with this label. + # TODO: do we want a different label for each workflow, or each kind of workflow? + label-name: S-ci-fail-auto-issue + # If there is already an open issue with this label, any failures become comments on that issue. + always-create-new-issue: false + github-token: ${{ secrets.GITHUB_TOKEN }} + diff --git a/.github/workflows/continous-integration-os.yml b/.github/workflows/ci-unit-tests-os.yml similarity index 57% rename from .github/workflows/continous-integration-os.yml rename to .github/workflows/ci-unit-tests-os.yml index 3266f24e561..fdc81a8cb1b 100644 --- a/.github/workflows/continous-integration-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -1,4 +1,4 @@ -name: CI OSes +name: Multi-OS Unit Tests # Ensures that only one workflow task will run at a time. Previous builds, if # already in process, will get cancelled. Only the latest commit will be allowed @@ -12,14 +12,12 @@ on: # we build Rust and Zcash parameter caches on main, # so they can be shared by all branches: # https://docs.github.com/en/actions/using-workflows/caching-dependencies-to-speed-up-workflows#restrictions-for-accessing-a-cache - push: - branches: - - main + + pull_request: paths: - # production code and test code + # code and tests - '**/*.rs' - # hard-coded checkpoints - # TODO: skip proptest regressions? + # hard-coded checkpoints and proptest regressions - '**/*.txt' # test data snapshots - '**/*.snap' @@ -31,12 +29,17 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/continous-integration-os.yml' - pull_request: + - '.github/workflows/ci-unit-tests-os.yml' + - '.github/workflows/sub-build-docker-image.yml' + + push: + branches: + - main paths: - # code and tests + # production code and test code - '**/*.rs' - # hard-coded checkpoints and proptest regressions + # hard-coded checkpoints + # TODO: skip proptest regressions? - '**/*.txt' # test data snapshots - '**/*.snap' @@ -48,7 +51,8 @@ on: - '.cargo/config.toml' - '**/clippy.toml' # workflow definitions - - '.github/workflows/continous-integration-os.yml' + - '.github/workflows/ci-unit-tests-os.yml' + - '.github/workflows/sub-build-docker-image.yml' env: CARGO_INCREMENTAL: ${{ vars.CARGO_INCREMENTAL }} @@ -58,6 +62,9 @@ env: COLORBT_SHOW_HIDDEN: ${{ vars.COLORBT_SHOW_HIDDEN }} jobs: + ######################################## + ### Build and test Zebra on all OSes ### + ######################################## test: name: Test ${{ matrix.rust }} on ${{ matrix.os }}${{ matrix.features }} # The large timeout is to accommodate: @@ -233,7 +240,6 @@ jobs: - name: Install last version of Protoc uses: arduino/setup-protoc@v2.1.0 with: - # TODO: increase to latest version after https://github.com/arduino/setup-protoc/issues/33 is fixed version: '23.x' repo-token: ${{ secrets.GITHUB_TOKEN }} @@ -319,3 +325,206 @@ jobs: else echo "No unused dependencies found." fi + + ######################################## + ### Build and test Zebra with Docker ### + ######################################## + # Build the docker image used by the tests. + # + # The default network in the Zebra config in the image is mainnet, unless a manually triggered + # workflow or repository variable is configured differently. Testnet jobs change that config to + # testnet when running the image. + build: + name: Build CI Docker + uses: ./.github/workflows/sub-build-docker-image.yml + with: + dockerfile_path: ./docker/Dockerfile + dockerfile_target: tests + image_name: ${{ vars.CI_IMAGE_NAME }} + no_cache: ${{ inputs.no_cache || false }} + rust_backtrace: full + rust_lib_backtrace: full + rust_log: info + + # zebrad tests without cached state + + # TODO: make the non-cached-state tests use: + # network: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Run all the zebra tests, including tests that are ignored by default. + # Skips tests that need a cached state disk or a lightwalletd binary. + # + # - We run all the tests behind the `getblocktemplate-rpcs` feature as a separated step. + # - We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests. + # + # TODO: turn this test and the getblocktemplate test into a matrix, so the jobs use exactly the same diagnostics settings + test-all: + name: Test all + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. + # + # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. + - name: Run zebrad tests + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # zebrad tests without cached state with `getblocktemplate-rpcs` feature + # + # Same as above but we run all the tests behind the `getblocktemplate-rpcs` feature. + test-all-getblocktemplate-rpcs: + name: Test all with getblocktemplate-rpcs feature + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run zebrad tests + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --name zebrad-tests --tty -e ${{ inputs.network || vars.ZCASH_NETWORK }} ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Run state tests with fake activation heights. + # + # This test changes zebra-chain's activation heights, + # which can recompile all the Zebra crates, + # so we want its build products to be cached separately. + # + # Also, we don't want to accidentally use the fake heights in other tests. + # + # (The gRPC feature is a zebrad feature, so it isn't needed here.) + test-fake-activation-heights: + name: Test with fake activation heights + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests with fake activation heights + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights + env: + TEST_FAKE_ACTIVATION_HEIGHTS: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. + # + # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) + test-empty-sync: + name: Test checkpoint sync from empty state + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run zebrad large sync tests + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test launching lightwalletd with an empty lightwalletd and Zebra state. + # + # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) + test-lightwalletd-integration: + name: Test integration with lightwalletd + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests with empty lightwalletd launch + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration + env: + ZEBRA_TEST_LIGHTWALLETD: '1' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test that Zebra works using the default config with the latest Zebra version + test-configuration-file: + name: Test Zebra default Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests using the default config + run: | + set -ex + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start + EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) + docker stop default-conf-tests + docker logs default-conf-tests + exit "$EXIT_STATUS" + env: + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + + # Test that Zebra works using the $ZEBRA_CONF_PATH config + test-zebra-conf-path: + name: Test Zebra custom Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests using the $ZEBRA_CONF_PATH + run: | + set -ex + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start + EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) + docker stop variable-conf-tests + docker logs variable-conf-tests + exit "$EXIT_STATUS" + env: + ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} diff --git a/.github/workflows/continous-integration-os.patch.yml b/.github/workflows/ci-unites-tests-os.patch.yml similarity index 97% rename from .github/workflows/continous-integration-os.patch.yml rename to .github/workflows/ci-unites-tests-os.patch.yml index fe81951024a..9a85bdedb74 100644 --- a/.github/workflows/continous-integration-os.patch.yml +++ b/.github/workflows/ci-unites-tests-os.patch.yml @@ -11,7 +11,7 @@ on: - '**/deny.toml' - '.cargo/config.toml' - '**/clippy.toml' - - '.github/workflows/continous-integration-os.yml' + - '.github/workflows/ci-unit-tests-os.yml' jobs: test: diff --git a/.github/workflows/continous-delivery.patch.yml b/.github/workflows/continous-delivery.patch.yml deleted file mode 100644 index f51ef601468..00000000000 --- a/.github/workflows/continous-delivery.patch.yml +++ /dev/null @@ -1,35 +0,0 @@ -name: CD - -on: - # Only patch the Docker image test jobs - pull_request: - paths-ignore: - # code and tests - - '**/*.rs' - # hard-coded checkpoints and proptest regressions - - '**/*.txt' - # dependencies - - '**/Cargo.toml' - - '**/Cargo.lock' - # configuration files - - '.cargo/config.toml' - - '**/clippy.toml' - # workflow definitions - - 'docker/**' - - '.dockerignore' - - '.github/workflows/continous-delivery.yml' - - '.github/workflows/find-cached-disks.yml' - - -jobs: - build: - name: Build CD Docker / Build images - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-configuration-file: - name: Test Zebra CD Docker config file - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/continous-integration-docker.patch-always.yml b/.github/workflows/continous-integration-docker.patch-always.yml deleted file mode 100644 index ccda278dd70..00000000000 --- a/.github/workflows/continous-integration-docker.patch-always.yml +++ /dev/null @@ -1,26 +0,0 @@ -# These jobs can be skipped based on cached Google Cloud state disks, -# and some of them just run on the `main` branch, -# so the patch jobs always need to run on every PR. -name: CI Docker - -on: - pull_request: - -jobs: - regenerate-stateful-disks: - name: Zebra checkpoint / Run sync-to-checkpoint test - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - test-full-sync: - name: Zebra tip / Run full-sync-to-tip test - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' - - lightwalletd-full-sync: - name: lightwalletd tip / Run lwd-full-sync test - runs-on: ubuntu-latest - steps: - - run: 'echo "No build required"' diff --git a/.github/workflows/docs.patch.yml b/.github/workflows/docs-deploy-firebase.patch.yml similarity index 100% rename from .github/workflows/docs.patch.yml rename to .github/workflows/docs-deploy-firebase.patch.yml diff --git a/.github/workflows/docs.yml b/.github/workflows/docs-deploy-firebase.yml similarity index 100% rename from .github/workflows/docs.yml rename to .github/workflows/docs-deploy-firebase.yml diff --git a/.github/workflows/dockerhub-description.yml b/.github/workflows/docs-dockerhub-description.yml similarity index 100% rename from .github/workflows/dockerhub-description.yml rename to .github/workflows/docs-dockerhub-description.yml diff --git a/.github/workflows/find-cached-disks.yml b/.github/workflows/manual-find-cached-disks.yml similarity index 100% rename from .github/workflows/find-cached-disks.yml rename to .github/workflows/manual-find-cached-disks.yml diff --git a/.github/workflows/zcashd-manual-deploy.yml b/.github/workflows/manual-zcashd-deploy.yml similarity index 100% rename from .github/workflows/zcashd-manual-deploy.yml rename to .github/workflows/manual-zcashd-deploy.yml diff --git a/.github/workflows/release-binaries.yml b/.github/workflows/release-binaries.yml index a96c15c2867..a88750f95a1 100644 --- a/.github/workflows/release-binaries.yml +++ b/.github/workflows/release-binaries.yml @@ -1,5 +1,5 @@ # This workflow is meant to trigger a build of Docker binaries when a release -# is published, it uses the existing `build-docker-image.yml` workflow +# is published, it uses the existing `sub-build-docker-image.yml` workflow # # We use a separate action as we might want to trigger this under # different circumstances than a Continuous Deployment, for example. @@ -21,7 +21,7 @@ jobs: # The image will be named `zebra:` build: name: Build Release Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime @@ -33,7 +33,7 @@ jobs: # The image will be named `zebra:.experimental` build-mining-testnet: name: Build Release Testnet Mining Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/Dockerfile dockerfile_target: runtime diff --git a/.github/workflows/build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml similarity index 100% rename from .github/workflows/build-docker-image.yml rename to .github/workflows/sub-build-docker-image.yml diff --git a/.github/workflows/zcash-lightwalletd.patch.yml b/.github/workflows/sub-build-lightwalletd.patch.yml similarity index 90% rename from .github/workflows/zcash-lightwalletd.patch.yml rename to .github/workflows/sub-build-lightwalletd.patch.yml index 1c4413c2975..cab893448d1 100644 --- a/.github/workflows/zcash-lightwalletd.patch.yml +++ b/.github/workflows/sub-build-lightwalletd.patch.yml @@ -10,7 +10,7 @@ on: - 'zebrad/src/config.rs' - 'zebrad/src/commands/start.rs' - 'docker/zcash-lightwalletd/Dockerfile' - - '.github/workflows/zcash-lightwalletd.yml' + - '.github/workflows/sub-build-lightwalletd.yml' jobs: build: diff --git a/.github/workflows/zcash-lightwalletd.yml b/.github/workflows/sub-build-lightwalletd.yml similarity index 97% rename from .github/workflows/zcash-lightwalletd.yml rename to .github/workflows/sub-build-lightwalletd.yml index c7ea1c24bb4..49f5cdf4036 100644 --- a/.github/workflows/zcash-lightwalletd.yml +++ b/.github/workflows/sub-build-lightwalletd.yml @@ -29,7 +29,7 @@ on: - 'zebrad/src/commands/start.rs' # these workflow definitions actually change the docker image - 'docker/zcash-lightwalletd/Dockerfile' - - '.github/workflows/zcash-lightwalletd.yml' + - '.github/workflows/sub-build-lightwalletd.yml' # Update the lightwalletd image when each related PR changes pull_request: @@ -42,7 +42,7 @@ on: - 'zebrad/src/commands/start.rs' # these workflow definitions actually change the docker image - 'docker/zcash-lightwalletd/Dockerfile' - - '.github/workflows/zcash-lightwalletd.yml' + - '.github/workflows/sub-build-lightwalletd.yml' env: IMAGE_NAME: lightwalletd diff --git a/.github/workflows/zcash-params.yml b/.github/workflows/sub-build-zcash-params.yml similarity index 86% rename from .github/workflows/zcash-params.yml rename to .github/workflows/sub-build-zcash-params.yml index 28bcea9a424..ee08b6fd3c2 100644 --- a/.github/workflows/zcash-params.yml +++ b/.github/workflows/sub-build-zcash-params.yml @@ -1,4 +1,4 @@ -name: zcash-params +name: Build zcash-params # Ensures that only one workflow task will run at a time. Previous deployments, if # already in process, won't get cancelled. Instead, we let the first to complete @@ -28,13 +28,13 @@ on: # workflow definitions - 'docker/zcash-params/Dockerfile' - '.dockerignore' - - '.github/workflows/zcash-params.yml' - - '.github/workflows/build-docker-image.yml' + - '.github/workflows/sub-build-zcash-params.yml' + - '.github/workflows/sub-build-docker-image.yml' jobs: build: name: Build Zcash Params Docker - uses: ./.github/workflows/build-docker-image.yml + uses: ./.github/workflows/sub-build-docker-image.yml with: dockerfile_path: ./docker/zcash-params/Dockerfile dockerfile_target: release diff --git a/book/src/dev/continuous-integration.md b/book/src/dev/continuous-integration.md index 3a7726fb3e0..895085f6395 100644 --- a/book/src/dev/continuous-integration.md +++ b/book/src/dev/continuous-integration.md @@ -90,7 +90,7 @@ This means that the entire workflow must be re-run when a single test fails. 1. Look for the earliest job that failed, and find the earliest failure. For example, this failure doesn't tell us what actually went wrong: -> Error: The template is not valid. ZcashFoundation/zebra/.github/workflows/build-docker-image.yml@8bbc5b21c97fafc83b70fbe7f3b5e9d0ffa19593 (Line: 52, Col: 19): Error reading JToken from JsonReader. Path '', line 0, position 0. +> Error: The template is not valid. ZcashFoundation/zebra/.github/workflows/sub-build-docker-image.yml@8bbc5b21c97fafc83b70fbe7f3b5e9d0ffa19593 (Line: 52, Col: 19): Error reading JToken from JsonReader. Path '', line 0, position 0. https://github.com/ZcashFoundation/zebra/runs/8181760421?check_suite_focus=true#step:41:4 diff --git a/docker/Dockerfile b/docker/Dockerfile index e16cac66930..067bec4a99f 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -13,7 +13,7 @@ # # Build zebrad with these features # Keep these in sync with: -# https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/build-docker-image.yml#L37 +# https://github.com/ZcashFoundation/zebra/blob/main/.github/workflows/sub-build-docker-image.yml#L37 ARG FEATURES="default-release-binaries" ARG TEST_FEATURES="lightwalletd-grpc-tests zebra-checkpoints" From 59e8f11a7d83ff27a0c8af0c7a07c898d8239e74 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 25 Sep 2023 09:51:25 +0100 Subject: [PATCH 02/59] fix(workflows): use correct name for patch --- .github/workflows/ci-unit-tests-docker.patch.yml | 2 +- .github/workflows/ci-unites-tests-os.patch.yml | 2 +- .github/workflows/sub-build-lightwalletd.patch.yml | 2 +- .github/workflows/sub-build-lightwalletd.yml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci-unit-tests-docker.patch.yml b/.github/workflows/ci-unit-tests-docker.patch.yml index 764b47ced71..5dbb927f523 100644 --- a/.github/workflows/ci-unit-tests-docker.patch.yml +++ b/.github/workflows/ci-unit-tests-docker.patch.yml @@ -1,4 +1,4 @@ -name: CI Docker +name: Docker Unit Tests # These jobs *don't* depend on cached Google Cloud state disks, # so they can be skipped when the modified files make the actual workflow run. diff --git a/.github/workflows/ci-unites-tests-os.patch.yml b/.github/workflows/ci-unites-tests-os.patch.yml index 9a85bdedb74..2d126cba054 100644 --- a/.github/workflows/ci-unites-tests-os.patch.yml +++ b/.github/workflows/ci-unites-tests-os.patch.yml @@ -1,4 +1,4 @@ -name: CI OSes +name: Multi-OS Unit Tests on: pull_request: diff --git a/.github/workflows/sub-build-lightwalletd.patch.yml b/.github/workflows/sub-build-lightwalletd.patch.yml index cab893448d1..c126aec0ffb 100644 --- a/.github/workflows/sub-build-lightwalletd.patch.yml +++ b/.github/workflows/sub-build-lightwalletd.patch.yml @@ -1,4 +1,4 @@ -name: zcash-lightwalletd +name: Build lightwalletd # When the real job doesn't run because the files aren't changed, # run a fake CI job to satisfy the branch protection rules. diff --git a/.github/workflows/sub-build-lightwalletd.yml b/.github/workflows/sub-build-lightwalletd.yml index 49f5cdf4036..5ccd917f591 100644 --- a/.github/workflows/sub-build-lightwalletd.yml +++ b/.github/workflows/sub-build-lightwalletd.yml @@ -1,6 +1,6 @@ # TODO: we should stop using this build approach with lightwalletd and move to using our # reusable workflow to building all the docker images of our repo -name: zcash-lightwalletd +name: Build lightwalletd # Ensures that only one workflow task will run at a time. Previous builds, if # already in process, will get cancelled. Only the latest commit will be allowed From 9f251b7e8512d88aef337987e685f38b41b7f229 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 25 Sep 2023 11:37:24 +0100 Subject: [PATCH 03/59] fix(workflow): docker unit tests --- .github/workflows/ci-unit-tests-docker.yml | 40 +++------------------- 1 file changed, 4 insertions(+), 36 deletions(-) diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 3f7ee654e1d..9480628effc 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -66,32 +66,6 @@ on: - '.github/workflows/sub-build-docker-image.yml' jobs: - # to also run a job on Mergify head branches, - # add `|| (github.event_name == 'push' && startsWith(github.head_ref, 'mergify/merge-queue/'))`: - # https://docs.github.com/en/actions/using-workflows/events-that-trigger-workflows#running-your-workflow-based-on-the-head-or-base-branch-of-a-pull-request-1 - - # Check if the cached state disks used by the tests are available for the default network. - # - # The default network is mainnet unless a manually triggered workflow or repository variable - # is configured differently. - # - # The outputs for this job have the same names as the workflow outputs in manual-find-cached-disks.yml - get-available-disks: - name: Check if cached state disks exist for ${{ inputs.network || vars.ZCASH_NETWORK }} - uses: ./.github/workflows/manual-find-cached-disks.yml - with: - network: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Check if the cached state disks used by the tests are available for testnet. - # - # The outputs for this job have the same names as the workflow outputs in manual-find-cached-disks.yml - # Some outputs are ignored, because we don't run those jobs on testnet. - get-available-disks-testnet: - name: Check if cached state disks exist for testnet - uses: ./.github/workflows/manual-find-cached-disks.yml - with: - network: 'Testnet' - # Build the docker image used by the tests. # # The default network in the Zebra config in the image is mainnet, unless a manually triggered @@ -109,13 +83,7 @@ jobs: rust_lib_backtrace: full rust_log: info - # zebrad tests without cached state - - # TODO: make the non-cached-state tests use: - # network: ${{ inputs.network || vars.ZCASH_NETWORK }} - # Run all the zebra tests, including tests that are ignored by default. - # Skips tests that need a cached state disk or a lightwalletd binary. # # - We run all the tests behind the `getblocktemplate-rpcs` feature as a separated step. # - We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests. @@ -296,8 +264,8 @@ jobs: - name: Run tests using the default config run: | set -ex - docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} # show the logs, even if the job times out docker logs --tail all --follow default-conf-tests | \ tee --output-error=exit /dev/stderr | \ @@ -336,8 +304,8 @@ jobs: - name: Run tests using a testnet config run: | set -ex - docker pull ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} - docker run --env "NETWORK=Testnet" --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/zebrad@${{ needs.build.outputs.image_digest }} + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run --env "NETWORK=Testnet" --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} # show the logs, even if the job times out docker logs --tail all --follow testnet-conf-tests | \ tee --output-error=exit /dev/stderr | \ From 6bc35ed08500dac25a1f48da2a224208bb112f4b Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 25 Sep 2023 20:24:23 +0100 Subject: [PATCH 04/59] fix(release): validation error Error: ``` Validation Failed: {"resource":"Release","code":"invalid","field":"target_commitish"} ``` Fixes: https://github.com/release-drafter/release-drafter/issues/1125 --- .github/workflows/release-drafter.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release-drafter.yml b/.github/workflows/release-drafter.yml index d3987e0a048..39e1ba7bde2 100644 --- a/.github/workflows/release-drafter.yml +++ b/.github/workflows/release-drafter.yml @@ -37,6 +37,7 @@ jobs: - uses: release-drafter/release-drafter@v5 with: config-name: release-drafter.yml + commitish: main #disable-autolabeler: true env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 6306a7d338572064de614123e593807d68f33029 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 25 Sep 2023 20:24:52 +0100 Subject: [PATCH 05/59] fix(workflows): reference correct name --- ...ml => cd-deploy-integration-tests-gcp.yml} | 0 .../ci-integration-tests-gcp.patch.yml | 2 +- .../workflows/ci-integration-tests-gcp.yml | 32 +++++++++---------- .../workflows/ci-unit-tests-docker.patch.yml | 2 +- .github/workflows/ci-unit-tests-docker.yml | 4 +-- 5 files changed, 20 insertions(+), 20 deletions(-) rename .github/workflows/{cd-deploy-tests-gcp.yml => cd-deploy-integration-tests-gcp.yml} (100%) diff --git a/.github/workflows/cd-deploy-tests-gcp.yml b/.github/workflows/cd-deploy-integration-tests-gcp.yml similarity index 100% rename from .github/workflows/cd-deploy-tests-gcp.yml rename to .github/workflows/cd-deploy-integration-tests-gcp.yml diff --git a/.github/workflows/ci-integration-tests-gcp.patch.yml b/.github/workflows/ci-integration-tests-gcp.patch.yml index bd58dfc7e49..0c76bad9382 100644 --- a/.github/workflows/ci-integration-tests-gcp.patch.yml +++ b/.github/workflows/ci-integration-tests-gcp.patch.yml @@ -21,7 +21,7 @@ on: - 'docker/**' - '.dockerignore' - '.github/workflows/ci-unit-tests-docker.yml' - - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/cd-deploy-integration-tests-gcp.yml' - '.github/workflows/manual-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' diff --git a/.github/workflows/ci-integration-tests-gcp.yml b/.github/workflows/ci-integration-tests-gcp.yml index 4c2f38713e6..bcd9d4324da 100644 --- a/.github/workflows/ci-integration-tests-gcp.yml +++ b/.github/workflows/ci-integration-tests-gcp.yml @@ -63,7 +63,7 @@ on: # workflow definitions - 'docker/**' - '.github/workflows/ci-integration-tests-gcp.yml' - - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/cd-deploy-integration-tests-gcp.yml' - '.github/workflows/sub-build-docker-image.yml' - '.github/workflows/manual-find-cached-disks.yml' @@ -87,7 +87,7 @@ on: - 'docker/**' - '.dockerignore' - '.github/workflows/ci-integration-tests-gcp.yml' - - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/cd-deploy-integration-tests-gcp.yml' - '.github/workflows/manual-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' @@ -147,7 +147,7 @@ jobs: regenerate-stateful-disks: name: Zebra checkpoint needs: [ build, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || github.event.inputs.regenerate-disks == 'true' }} with: app_name: zebrad @@ -176,7 +176,7 @@ jobs: test-stateful-sync: name: Zebra checkpoint update needs: [ regenerate-stateful-disks, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || needs.regenerate-stateful-disks.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -204,7 +204,7 @@ jobs: test-full-sync: name: Zebra tip needs: [ build, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }} with: app_name: zebrad @@ -247,7 +247,7 @@ jobs: test-update-sync: name: Zebra tip update needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -280,7 +280,7 @@ jobs: generate-checkpoints-mainnet: name: Generate checkpoints mainnet needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -315,7 +315,7 @@ jobs: test-full-sync-testnet: name: Zebra tip on testnet needs: [ build, get-available-disks-testnet ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }} with: app_name: zebrad @@ -361,7 +361,7 @@ jobs: generate-checkpoints-testnet: name: Generate checkpoints testnet needs: [ test-full-sync-testnet, get-available-disks-testnet ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || needs.test-full-sync-testnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -394,7 +394,7 @@ jobs: lightwalletd-full-sync: name: lightwalletd tip needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml # Currently the lightwalletd tests only work on Mainnet if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} with: @@ -434,7 +434,7 @@ jobs: lightwalletd-update-sync: name: lightwalletd tip update needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -466,7 +466,7 @@ jobs: lightwalletd-rpc-test: name: Zebra tip JSON-RPC needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -491,7 +491,7 @@ jobs: lightwalletd-transactions-test: name: lightwalletd tip send needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -518,7 +518,7 @@ jobs: lightwalletd-grpc-test: name: lightwalletd GRPC tests needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -549,7 +549,7 @@ jobs: get-block-template-test: name: get block template needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -575,7 +575,7 @@ jobs: submit-block-test: name: submit block needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-integration-tests-gcp.yml + uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad diff --git a/.github/workflows/ci-unit-tests-docker.patch.yml b/.github/workflows/ci-unit-tests-docker.patch.yml index 5dbb927f523..66922ab68ed 100644 --- a/.github/workflows/ci-unit-tests-docker.patch.yml +++ b/.github/workflows/ci-unit-tests-docker.patch.yml @@ -21,7 +21,7 @@ on: - 'docker/**' - '.dockerignore' - '.github/workflows/ci-unit-tests-docker.yml' - - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/cd-deploy-integration-tests-gcp.yml' - '.github/workflows/manual-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 9480628effc..2ab7d637438 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -37,7 +37,7 @@ on: # workflow definitions - 'docker/**' - '.github/workflows/ci-unit-tests-docker.yml' - - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/cd-deploy-integration-tests-gcp.yml' - '.github/workflows/sub-build-docker-image.yml' - '.github/workflows/manual-find-cached-disks.yml' @@ -61,7 +61,7 @@ on: - 'docker/**' - '.dockerignore' - '.github/workflows/ci-unit-tests-docker.yml' - - '.github/workflows/cd-integration-tests-gcp.yml' + - '.github/workflows/cd-deploy-integration-tests-gcp.yml' - '.github/workflows/manual-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' From c78dbd2752cf8aa84f366c1c532070c1f106f436 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 25 Sep 2023 21:50:25 +0100 Subject: [PATCH 06/59] fix: remove extra workflow --- .github/workflows/chore-deploy-gcp-tests.yml | 966 ------------------- 1 file changed, 966 deletions(-) delete mode 100644 .github/workflows/chore-deploy-gcp-tests.yml diff --git a/.github/workflows/chore-deploy-gcp-tests.yml b/.github/workflows/chore-deploy-gcp-tests.yml deleted file mode 100644 index 78acc9ae27a..00000000000 --- a/.github/workflows/chore-deploy-gcp-tests.yml +++ /dev/null @@ -1,966 +0,0 @@ -name: Deploy GCP tests - -on: - workflow_call: - inputs: - # Status and logging - test_id: - required: true - type: string - description: 'Unique identifier for the test' - test_description: - required: true - type: string - description: 'Explains what the test does' - height_grep_text: - required: false - type: string - description: 'Regular expression to find the tip height in test logs, and add it to newly created cached state image metadata' - - # Test selection and parameters - test_variables: - required: true - type: string - description: 'Environmental variables used to select and configure the test' - network: - required: false - type: string - default: Mainnet - description: 'Zcash network to test against' - is_long_test: - required: false - type: boolean - default: false - description: 'Does this test need multiple run jobs? (Does it run longer than 6 hours?)' - - # Cached state - # - # TODO: find a better name - root_state_path: - required: false - type: string - default: '/zebrad-cache' - description: 'Cached state base directory path' - # TODO: find a better name - zebra_state_dir: - required: false - type: string - default: '' - description: 'Zebra cached state directory and input image prefix to search in GCP' - # TODO: find a better name - lwd_state_dir: - required: false - type: string - default: '' - description: 'Lightwalletd cached state directory and input image prefix to search in GCP' - disk_prefix: - required: false - type: string - default: 'zebrad-cache' - description: 'Image name prefix, and `zebra_state_dir` name for newly created cached states' - disk_suffix: - required: false - type: string - description: 'Image name suffix' - needs_zebra_state: - required: true - type: boolean - description: 'Does the test use Zebra cached state?' - needs_lwd_state: - required: false - type: boolean - description: 'Does the test use Lightwalletd and Zebra cached state?' - # main branch states can be outdated and slower, but they can also be more reliable - prefer_main_cached_state: - required: false - type: boolean - default: false - description: 'Does the test prefer to use a main branch cached state?' - saves_to_disk: - required: true - type: boolean - description: 'Can this test create new or updated cached state disks?' - force_save_to_disk: - required: false - type: boolean - default: false - description: 'Force this test to create a new or updated cached state disk' - app_name: - required: false - type: string - default: 'zebra' - description: 'Application name, used to work out when a job is an update job' - -env: - # How many previous log lines we show at the start of each new log job. - # Increase this number if some log lines are skipped between jobs - # - # We want to show all the logs since the last job finished, - # but we don't know how long it will be between jobs. - # 200 lines is about 6-15 minutes of sync logs, or one panic log. - EXTRA_LOG_LINES: 200 - # How many blocks to wait before creating an updated cached state image. - # 1 day is approximately 1152 blocks. - CACHED_STATE_UPDATE_LIMIT: 576 - -jobs: - # set up and launch the test, if it doesn't use any cached state - # each test runs one of the *-with/without-cached-state job series, and skips the other - launch-without-cached-state: - name: Launch ${{ inputs.test_id }} test - if: ${{ !inputs.needs_zebra_state }} - runs-on: zfnd-runners - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v4.0.0 - with: - persist-credentials: false - fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Makes the Zcash network name lowercase. - # - # Labels in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network }}. - # - # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. - - name: Downcase network name for labels - run: | - NETWORK_CAPS="${{ inputs.network }}" - echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" - - # Install our SSH secret - - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.5.1 - with: - key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} - name: google_compute_engine - known_hosts: unnecessary - - - name: Generate public SSH key - run: | - sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client - ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - - # Create a Compute Engine virtual machine - - name: Create ${{ inputs.test_id }} GCP compute instance - id: create-instance - run: | - gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ - --boot-disk-size 300GB \ - --boot-disk-type pd-ssd \ - --image-project=cos-cloud \ - --image-family=cos-stable \ - --create-disk=name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ - --container-image=gcr.io/google-containers/busybox \ - --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ - --scopes cloud-platform \ - --metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \ - --metadata-from-file=startup-script=.github/workflows/scripts/gcp-vm-startup-script.sh \ - --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ - --tags ${{ inputs.app_name }} \ - --zone ${{ vars.GCP_ZONE }} - sleep 60 - - # Create a docker volume with the new disk we just created. - # - # SSH into the just created VM, and create a docker volume with the newly created disk. - - name: Create ${{ inputs.test_id }} Docker volume - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - sudo mkfs.ext4 -v /dev/sdb \ - && \ - sudo docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ - ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ - " - - # Launch the test without any cached state - - name: Launch ${{ inputs.test_id }} test - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - sudo docker run \ - --name ${{ inputs.test_id }} \ - --tty \ - --detach \ - ${{ inputs.test_variables }} \ - --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ - " - - # set up and launch the test, if it uses cached state - # each test runs one of the *-with/without-cached-state job series, and skips the other - launch-with-cached-state: - name: Launch ${{ inputs.test_id }} test - if: ${{ inputs.needs_zebra_state }} - runs-on: zfnd-runners - outputs: - cached_disk_name: ${{ steps.get-disk-name.outputs.cached_disk_name }} - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v4.0.0 - with: - persist-credentials: false - fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Downcase network name for disks and labels - run: | - NETWORK_CAPS="${{ inputs.network }}" - echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" - - # Install our SSH secret - - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.5.1 - with: - key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} - name: google_compute_engine - known_hosts: unnecessary - - - name: Generate public SSH key - run: | - sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client - ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - - # Find a cached state disk for this job, matching all of: - # - disk cached state (lwd_state_dir/zebra_state_dir or disk_prefix) - zebrad-cache or lwd-cache - # - state version (from the source code) - v{N} - # - network (network) - mainnet or testnet - # - disk target height kind (disk_suffix) - checkpoint or tip - # - # If the test needs a lightwalletd state (needs_lwd_state) set the variable DISK_PREFIX accordingly - # - To ${{ inputs.lwd_state_dir }}" if needed - # - To ${{ inputs.zebra_state_dir || inputs.disk_prefix }} if not - # - # If there are multiple disks: - # - prefer images generated from the same commit, then - # - if prefer_main_cached_state is true, prefer images from the `main` branch, then - # - use any images from any other branch or commit. - # Within each of these categories: - # - prefer newer images to older images - # - # Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable - # Passes the state version to subsequent steps using $STATE_VERSION env variable - # - # TODO: move this script into a file, and call it from manual-find-cached-disks.yml as well. - - name: Find ${{ inputs.test_id }} cached state disk - id: get-disk-name - run: | - LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) - echo "STATE_VERSION: $LOCAL_STATE_VERSION" - - if [[ "${{ inputs.needs_lwd_state }}" == "true" ]]; then - DISK_PREFIX=${{ inputs.lwd_state_dir }} - else - DISK_PREFIX=${{ inputs.zebra_state_dir || inputs.disk_prefix }} - fi - - # Try to find an image generated from a previous step or run of this commit. - # Fields are listed in the "Create image from state disk" step. - # - # We don't want to match the full branch name here, because: - # - we want to ignore the different GITHUB_REFs across manually triggered jobs, - # pushed branches, and PRs, - # - previous commits might have been buggy, - # or they might have worked and hide bugs in this commit - # (we can't avoid this issue entirely, but we don't want to make it more likely), and - # - the branch name might have been shortened for the image. - # - # The probability of two matching short commit hashes within the same month is very low. - COMMIT_DISK_PREFIX="${DISK_PREFIX}-.+-${{ env.GITHUB_SHA_SHORT }}-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" - COMMIT_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${COMMIT_DISK_PREFIX}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "${GITHUB_REF_SLUG_URL}-${{ env.GITHUB_SHA_SHORT }} Disk: $COMMIT_CACHED_DISK_NAME" - if [[ -n "$COMMIT_CACHED_DISK_NAME" ]]; then - echo "Description: $(gcloud compute images describe $COMMIT_CACHED_DISK_NAME --format='value(DESCRIPTION)')" - fi - - # Try to find an image generated from the main branch - MAIN_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-main-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "main Disk: $MAIN_CACHED_DISK_NAME" - if [[ -n "$MAIN_CACHED_DISK_NAME" ]]; then - echo "Description: $(gcloud compute images describe $MAIN_CACHED_DISK_NAME --format='value(DESCRIPTION)')" - fi - - # Try to find an image generated from any other branch - ANY_CACHED_DISK_NAME=$(gcloud compute images list --filter="status=READY AND name~${DISK_PREFIX}-.+-[0-9a-f]+-v${LOCAL_STATE_VERSION}-${NETWORK}-${{ inputs.disk_suffix }}" --format="value(NAME)" --sort-by=~creationTimestamp --limit=1) - echo "any branch Disk: $ANY_CACHED_DISK_NAME" - if [[ -n "$ANY_CACHED_DISK_NAME" ]]; then - echo "Description: $(gcloud compute images describe $ANY_CACHED_DISK_NAME --format='value(DESCRIPTION)')" - fi - - # Select a cached disk based on the job settings - CACHED_DISK_NAME="$COMMIT_CACHED_DISK_NAME" - if [[ -z "$CACHED_DISK_NAME" ]] && [[ "${{ inputs.prefer_main_cached_state }}" == "true" ]]; then - echo "Preferring main branch cached state to other branches..." - CACHED_DISK_NAME="$MAIN_CACHED_DISK_NAME" - fi - if [[ -z "$CACHED_DISK_NAME" ]]; then - CACHED_DISK_NAME="$ANY_CACHED_DISK_NAME" - fi - - if [[ -z "$CACHED_DISK_NAME" ]]; then - echo "No cached state disk available" - echo "Expected ${COMMIT_DISK_PREFIX}" - echo "Also searched for cached disks from other branches" - echo "Cached state test jobs must depend on the cached state rebuild job" - exit 1 - fi - - echo "Selected Disk: $CACHED_DISK_NAME" - echo "cached_disk_name=$CACHED_DISK_NAME" >> "$GITHUB_OUTPUT" - - echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV" - echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> "$GITHUB_ENV" - - # Create a Compute Engine virtual machine and attach a cached state disk using the - # $CACHED_DISK_NAME variable as the source image to populate the disk cached state - - name: Create ${{ inputs.test_id }} GCP compute instance - id: create-instance - run: | - gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ - --boot-disk-size 300GB \ - --boot-disk-type pd-ssd \ - --image-project=cos-cloud \ - --image-family=cos-stable \ - --create-disk=image=${{ env.CACHED_DISK_NAME }},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ - --container-image=gcr.io/google-containers/busybox \ - --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ - --scopes cloud-platform \ - --metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \ - --metadata-from-file=startup-script=.github/workflows/scripts/gcp-vm-startup-script.sh \ - --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ - --tags ${{ inputs.app_name }} \ - --zone ${{ vars.GCP_ZONE }} - sleep 60 - - # Create a docker volume with the selected cached state. - # - # SSH into the just created VM and create a docker volume with the recently attached disk. - # (The cached state and disk are usually the same size, - # but the cached state can be smaller if we just increased the disk size.) - - name: Create ${{ inputs.test_id }} Docker volume - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - sudo docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ - ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ - " - - # Launch the test with the previously created Zebra-only cached state. - # Each test runs one of the "Launch test" steps, and skips the other. - # - # SSH into the just created VM, and create a Docker container to run the incoming test - # from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job. - # - # The disk mounted in the VM is located at /dev/sdb, we mount the root `/` of this disk to the docker - # container in one path: - # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR - # - # This path must match the variable used by the tests in Rust, which are also set in - # `ci-unit-tests-docker.yml` to be able to run this tests. - # - # Although we're mounting the disk root, Zebra will only respect the values from - # $ZEBRA_CACHED_STATE_DIR. The inputs like ${{ inputs.zebra_state_dir }} are only used - # to match that variable paths. - - name: Launch ${{ inputs.test_id }} test - # This step only runs for tests that just read or write a Zebra state. - # - # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. - # TODO: we should find a better logic for this use cases - if: ${{ (inputs.needs_zebra_state && !inputs.needs_lwd_state) && inputs.test_id != 'lwd-full-sync' }} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - sudo docker run \ - --name ${{ inputs.test_id }} \ - --tty \ - --detach \ - ${{ inputs.test_variables }} \ - --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ - " - - # Launch the test with the previously created Lightwalletd and Zebra cached state. - # Each test runs one of the "Launch test" steps, and skips the other. - # - # SSH into the just created VM, and create a Docker container to run the incoming test - # from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job. - # - # In this step we're using the same disk for simplicity, as mounting multiple disks to the - # VM and to the container might require more steps in this workflow, and additional - # considerations. - # - # The disk mounted in the VM is located at /dev/sdb, we mount the root `/` of this disk to the docker - # container in two different paths: - # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR - # - /var/cache/lwd-cache -> ${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} -> $LIGHTWALLETD_DATA_DIR - # - # This doesn't cause any path conflicts, because Zebra and lightwalletd create different - # subdirectories for their data. (But Zebra, lightwalletd, and the test harness must not - # delete the whole cache directory.) - # - # This paths must match the variables used by the tests in Rust, which are also set in - # `ci-unit-tests-docker.yml` to be able to run this tests. - # - # Although we're mounting the disk root to both directories, Zebra and Lightwalletd - # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, - # the inputs like ${{ inputs.lwd_state_dir }} are only used to match those variables paths. - - name: Launch ${{ inputs.test_id }} test - # This step only runs for tests that read or write Lightwalletd and Zebra states. - # - # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. - # TODO: we should find a better logic for this use cases - if: ${{ (inputs.needs_zebra_state && inputs.needs_lwd_state) || inputs.test_id == 'lwd-full-sync' }} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - sudo docker run \ - --name ${{ inputs.test_id }} \ - --tty \ - --detach \ - ${{ inputs.test_variables }} \ - --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} \ - ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ - " - - # Show all the test logs, then follow the logs of the test we just launched, until it finishes. - # Then check the result of the test. - # - # If `inputs.is_long_test` is `true`, the timeout is 5 days, otherwise it's 3 hours. - test-result: - name: Run ${{ inputs.test_id }} test - # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. - needs: [ launch-with-cached-state, launch-without-cached-state ] - # If the previous job fails, we also want to run and fail this job, - # so that the branch protection rule fails in Mergify and GitHub. - if: ${{ !cancelled() }} - timeout-minutes: ${{ inputs.is_long_test && 7200 || 180 }} - runs-on: zfnd-runners - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v4.0.0 - with: - persist-credentials: false - fetch-depth: '2' - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Install our SSH secret - - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.5.1 - with: - key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} - name: google_compute_engine - known_hosts: unnecessary - - - name: Generate public SSH key - run: | - sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client - ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - - # Show all the logs since the container launched, - # following until we see zebrad startup messages. - # - # This check limits the number of log lines, so tests running on the wrong network don't - # run until the job timeout. If Zebra does a complete recompile, there are a few hundred log - # lines before the startup logs. So that's what we use here. - # - # The log pipeline ignores the exit status of `docker logs`. - # It also ignores the expected 'broken pipe' error from `tee`, - # which happens when `grep` finds a matching output and moves on to the next job. - # - # Errors in the tests are caught by the final test status job. - - name: Check startup logs for ${{ inputs.test_id }} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - sudo docker logs \ - --tail all \ - --follow \ - ${{ inputs.test_id }} | \ - head -700 | \ - tee --output-error=exit /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - -e 'Zcash network: ${{ inputs.network }}' \ - " - - # Check that the container executed at least 1 Rust test harness test, and that all tests passed. - # Then wait for the container to finish, and exit with the test's exit status. - # Also shows all the test logs. - # - # If the container has already finished, `docker wait` should return its status. - # But sometimes this doesn't work, so we use `docker inspect` as a fallback. - # - # `docker wait` prints the container exit status as a string, but we need to exit the `ssh` command - # with that status. - # (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.) - - name: Result of ${{ inputs.test_id }} test - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=' \ - set -e; - set -o pipefail; - trap '' PIPE; - - sudo docker logs \ - --tail all \ - --follow \ - ${{ inputs.test_id }} | \ - tee --output-error=exit /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - "test result: .*ok.* [1-9][0-9]* passed.*finished in"; \ - - EXIT_STATUS=$( \ - sudo docker wait ${{ inputs.test_id }} || \ - sudo docker inspect --format "{{.State.ExitCode}}" ${{ inputs.test_id }} || \ - echo "missing container, or missing exit status for container" \ - ); \ - - echo "sudo docker exit status: $EXIT_STATUS"; \ - exit "$EXIT_STATUS" \ - ' - - # create a state image from the instance's state disk, if requested by the caller - create-state-image: - name: Create ${{ inputs.test_id }} cached state image - runs-on: ubuntu-latest - needs: [ test-result, launch-with-cached-state ] - # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. - # Normally, if a job is skipped, all the jobs that depend on it are also skipped. - # So we need to override the default success() check to make this job run. - if: ${{ !cancelled() && !failure() && (inputs.saves_to_disk || inputs.force_save_to_disk) }} - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v4.0.0 - with: - persist-credentials: false - fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Performs formatting on disk name components. - # - # Disk images in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network }}. - # - # Disk image names in GCP are limited to 63 characters, so we need to limit - # branch names to 12 characters. - # - # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. - # Passes ${{ env.GITHUB_REF_SLUG_URL }} to subsequent steps using $SHORT_GITHUB_REF env variable. - - name: Format network name and branch name for disks - run: | - NETWORK_CAPS="${{ inputs.network }}" - echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" - LONG_GITHUB_REF="${{ env.GITHUB_REF_SLUG_URL }}" - echo "SHORT_GITHUB_REF=${LONG_GITHUB_REF:0:12}" >> "$GITHUB_ENV" - - # Install our SSH secret - - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.5.1 - with: - key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} - name: google_compute_engine - known_hosts: unnecessary - - - name: Generate public SSH key - run: | - sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client - ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - - # Get the state version from the local constants.rs file to be used in the image creation, - # as the state version is part of the disk image name. - # - # Passes the state version to subsequent steps using $STATE_VERSION env variable - - name: Get state version from constants.rs - run: | - LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" $GITHUB_WORKSPACE/zebra-state/src/constants.rs | grep -oE "[0-9]+" | tail -n1) - echo "STATE_VERSION: $LOCAL_STATE_VERSION" - - echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV" - - # Sets the $UPDATE_SUFFIX env var to "-u" if updating a previous cached state, - # and the empty string otherwise. - # - # Also sets a unique date and time suffix $TIME_SUFFIX. - - name: Set update and time suffixes - run: | - UPDATE_SUFFIX="" - - if [[ "${{ inputs.needs_zebra_state }}" == "true" ]] && [[ "${{ inputs.app_name }}" == "zebrad" ]]; then - UPDATE_SUFFIX="-u" - fi - - # TODO: find a better logic for the lwd-full-sync case - if [[ "${{ inputs.needs_lwd_state }}" == "true" ]] && [[ "${{ inputs.app_name }}" == "lightwalletd" ]] && [[ "${{ inputs.test_id }}" != 'lwd-full-sync' ]]; then - UPDATE_SUFFIX="-u" - fi - - # We're going to delete old images after a few days, so we only need the time here - TIME_SUFFIX=$(date '+%H%M%S' --utc) - - echo "UPDATE_SUFFIX=$UPDATE_SUFFIX" >> "$GITHUB_ENV" - echo "TIME_SUFFIX=$TIME_SUFFIX" >> "$GITHUB_ENV" - - # Get the full initial and running database versions from the test logs. - # These versions are used as part of the disk description and labels. - # - # If these versions are missing from the logs, the job fails. - # - # Typically, the database versions are around line 20 in the logs.. - # But we check the first 1000 log lines, just in case the test harness recompiles all the - # dependencies before running the test. (This can happen if the cache is invalid.) - # - # Passes the versions to subsequent steps using the $INITIAL_DISK_DB_VERSION, - # $RUNNING_DB_VERSION, and $DB_VERSION_SUMMARY env variables. - - name: Get database versions from logs - run: | - INITIAL_DISK_DB_VERSION="" - RUNNING_DB_VERSION="" - DB_VERSION_SUMMARY="" - - DOCKER_LOGS=$( \ - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=" \ - sudo docker logs ${{ inputs.test_id }} | head -1000 \ - ") - - # either a semantic version or "creating new database" - INITIAL_DISK_DB_VERSION=$( \ - echo "$DOCKER_LOGS" | \ - grep --extended-regexp --only-matching 'initial disk state version: [0-9a-z\.]+' | \ - grep --extended-regexp --only-matching '[0-9a-z\.]+' | \ - tail -1 || \ - [[ $? == 1 ]] \ - ) - - if [[ -z "$INITIAL_DISK_DB_VERSION" ]]; then - echo "Checked logs:" - echo "" - echo "$DOCKER_LOGS" - echo "" - echo "Missing initial disk database version in logs: $INITIAL_DISK_DB_VERSION" - # Fail the tests, because Zebra didn't log the initial disk database version, - # or the regex in this step is wrong. - false - fi - - if [[ "$INITIAL_DISK_DB_VERSION" = "creating.new.database" ]]; then - INITIAL_DISK_DB_VERSION="new" - else - INITIAL_DISK_DB_VERSION="v${INITIAL_DISK_DB_VERSION//./-}" - fi - - echo "Found initial disk database version in logs: $INITIAL_DISK_DB_VERSION" - echo "INITIAL_DISK_DB_VERSION=$INITIAL_DISK_DB_VERSION" >> "$GITHUB_ENV" - - RUNNING_DB_VERSION=$( \ - echo "$DOCKER_LOGS" | \ - grep --extended-regexp --only-matching 'running state version: [0-9\.]+' | \ - grep --extended-regexp --only-matching '[0-9\.]+' | \ - tail -1 || \ - [[ $? == 1 ]] \ - ) - - if [[ -z "$RUNNING_DB_VERSION" ]]; then - echo "Checked logs:" - echo "" - echo "$DOCKER_LOGS" - echo "" - echo "Missing running database version in logs: $RUNNING_DB_VERSION" - # Fail the tests, because Zebra didn't log the running database version, - # or the regex in this step is wrong. - false - fi - - RUNNING_DB_VERSION="v${RUNNING_DB_VERSION//./-}" - echo "Found running database version in logs: $RUNNING_DB_VERSION" - echo "RUNNING_DB_VERSION=$RUNNING_DB_VERSION" >> "$GITHUB_ENV" - - if [[ "$INITIAL_DISK_DB_VERSION" = "$RUNNING_DB_VERSION" ]]; then - DB_VERSION_SUMMARY="$RUNNING_DB_VERSION" - elif [[ "$INITIAL_DISK_DB_VERSION" = "new" ]]; then - DB_VERSION_SUMMARY="$RUNNING_DB_VERSION in new database" - else - DB_VERSION_SUMMARY="$INITIAL_DISK_DB_VERSION changing to $RUNNING_DB_VERSION" - fi - - echo "Summarised database versions from logs: $DB_VERSION_SUMMARY" - echo "DB_VERSION_SUMMARY=$DB_VERSION_SUMMARY" >> "$GITHUB_ENV" - - # Get the sync height from the test logs, which is later used as part of the - # disk description and labels. - # - # The regex used to grep the sync height is provided by ${{ inputs.height_grep_text }}, - # this allows to dynamically change the height as needed by different situations or - # based on the logs output from different tests. - # - # If the sync height is missing from the logs, the job fails. - # - # Passes the sync height to subsequent steps using the $SYNC_HEIGHT env variable. - - name: Get sync height from logs - run: | - SYNC_HEIGHT="" - - DOCKER_LOGS=$( \ - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command=" \ - sudo docker logs ${{ inputs.test_id }} --tail 200 \ - ") - - SYNC_HEIGHT=$( \ - echo "$DOCKER_LOGS" | \ - grep --extended-regexp --only-matching '${{ inputs.height_grep_text }}[0-9]+' | \ - grep --extended-regexp --only-matching '[0-9]+' | \ - tail -1 || \ - [[ $? == 1 ]] \ - ) - - if [[ -z "$SYNC_HEIGHT" ]]; then - echo "Checked logs:" - echo "" - echo "$DOCKER_LOGS" - echo "" - echo "Missing sync height in logs: $SYNC_HEIGHT" - # Fail the tests, because Zebra and lightwalletd didn't log their sync heights, - # or the CI workflow sync height regex is wrong. - false - fi - - echo "Found sync height in logs: $SYNC_HEIGHT" - echo "SYNC_HEIGHT=$SYNC_HEIGHT" >> "$GITHUB_ENV" - - # Get the original cached state height from google cloud. - # - # If the height is missing from the image labels, uses zero instead. - # - # TODO: fail the job if needs_zebra_state but the height is missing - # we can make this change after all the old images have been deleted, this should happen around 15 September 2022 - # we'll also need to do a manual checkpoint rebuild before opening the PR for this change - # - # Passes the original height to subsequent steps using $ORIGINAL_HEIGHT env variable. - - name: Get original cached state height from google cloud - run: | - ORIGINAL_HEIGHT="0" - ORIGINAL_DISK_NAME="${{ format('{0}', needs.launch-with-cached-state.outputs.cached_disk_name) }}" - - if [[ -n "$ORIGINAL_DISK_NAME" ]]; then - ORIGINAL_HEIGHT=$(gcloud compute images list --filter="status=READY AND name=$ORIGINAL_DISK_NAME" --format="value(labels.height)") - ORIGINAL_HEIGHT=${ORIGINAL_HEIGHT:-0} - echo "$ORIGINAL_DISK_NAME height: $ORIGINAL_HEIGHT" - else - ORIGINAL_DISK_NAME="new-disk" - echo "newly created disk, original height set to 0" - fi - - echo "ORIGINAL_HEIGHT=$ORIGINAL_HEIGHT" >> "$GITHUB_ENV" - echo "ORIGINAL_DISK_NAME=$ORIGINAL_DISK_NAME" >> "$GITHUB_ENV" - - # Create an image from the state disk, which will be used for any tests that start - # after it is created. These tests can be in the same workflow, or in a different PR. - # - # Using the newest image makes future jobs faster, because it is closer to the chain tip. - # - # Skips creating updated images if the original image is less than $CACHED_STATE_UPDATE_LIMIT behind the current tip. - # Full sync images are always created. - # - # The image can contain: - # - Zebra cached state, or - # - Zebra + lightwalletd cached state. - # Which cached state is being saved to the disk is defined by ${{ inputs.disk_prefix }}. - # - # Google Cloud doesn't have an atomic image replacement operation. - # We don't want to delete and re-create the image, because that causes a ~5 minute - # window where might be no recent image. So we add an extra image with a unique name, - # which gets selected because it has a later creation time. - # This also simplifies the process of deleting old images, - # because we don't have to worry about accidentally deleting all the images. - # - # The timestamp makes images from the same commit unique, - # as long as they don't finish in the same second. - # (This is unlikely, because each image created by a workflow has a different name.) - # - # The image name must also be 63 characters or less. - # - # Force the image creation (--force) as the disk is still attached even though is not being - # used by the container. - - name: Create image from state disk - run: | - MINIMUM_UPDATE_HEIGHT=$((ORIGINAL_HEIGHT+CACHED_STATE_UPDATE_LIMIT)) - if [[ -z "$UPDATE_SUFFIX" ]] || [[ "$SYNC_HEIGHT" -gt "$MINIMUM_UPDATE_HEIGHT" ]] || [[ "${{ inputs.force_save_to_disk }}" == "true" ]]; then - gcloud compute images create \ - "${{ inputs.disk_prefix }}-${SHORT_GITHUB_REF}-${{ env.GITHUB_SHA_SHORT }}-v${{ env.STATE_VERSION }}-${NETWORK}-${{ inputs.disk_suffix }}${UPDATE_SUFFIX}-${TIME_SUFFIX}" \ - --force \ - --source-disk=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ - --source-disk-zone=${{ vars.GCP_ZONE }} \ - --storage-location=us \ - --description="Created from commit ${{ env.GITHUB_SHA_SHORT }} with height ${{ env.SYNC_HEIGHT }} and database format ${{ env.DB_VERSION_SUMMARY }}" \ - --labels="height=${{ env.SYNC_HEIGHT }},purpose=${{ inputs.disk_prefix }},commit=${{ env.GITHUB_SHA_SHORT }},state-version=${{ env.STATE_VERSION }},state-running-version=${RUNNING_DB_VERSION},initial-state-disk-version=${INITIAL_DISK_DB_VERSION},network=${NETWORK},target-height-kind=${{ inputs.disk_suffix }},update-flag=${UPDATE_SUFFIX},force-save=${{ inputs.force_save_to_disk }},updated-from-height=${ORIGINAL_HEIGHT},updated-from-disk=${ORIGINAL_DISK_NAME},test-id=${{ inputs.test_id }},app-name=${{ inputs.app_name }}" - else - echo "Skipped cached state update because the new sync height $SYNC_HEIGHT was less than $CACHED_STATE_UPDATE_LIMIT blocks above the original height $ORIGINAL_HEIGHT of $ORIGINAL_DISK_NAME" - fi - - # delete the Google Cloud instance for this test - delete-instance: - name: Delete ${{ inputs.test_id }} instance - runs-on: ubuntu-latest - needs: [ create-state-image ] - # If a disk generation step timeouts (+6 hours) the previous job (creating the image) will be skipped. - # Even if the instance continues running, no image will be created, so it's better to delete it. - if: always() - continue-on-error: true - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v4.0.0 - with: - persist-credentials: false - fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - - # Deletes the instances that has been recently deployed in the actual commit after all - # previous jobs have run, no matter the outcome of the job. - - name: Delete test instance - continue-on-error: true - run: | - INSTANCE=$(gcloud compute instances list --filter=${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} --format='value(NAME)') - if [ -z "${INSTANCE}" ]; then - echo "No instance to delete" - else - gcloud compute instances delete "${INSTANCE}" --zone "${{ vars.GCP_ZONE }}" --delete-disks all --quiet - fi From 964650fdb618a5af48e7d4868b4555977b202386 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Mon, 25 Sep 2023 21:50:30 +0100 Subject: [PATCH 07/59] fix(workflows): use larger runners --- .github/workflows/ci-coverage.yml | 2 +- .github/workflows/sub-build-docker-image.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-coverage.yml b/.github/workflows/ci-coverage.yml index eecc3e8aff2..6dccedd08ea 100644 --- a/.github/workflows/ci-coverage.yml +++ b/.github/workflows/ci-coverage.yml @@ -54,7 +54,7 @@ jobs: # - stable builds (typically 30-50 minutes), and # - parameter downloads (an extra 90 minutes, but only when the cache expires) timeout-minutes: 140 - runs-on: ubuntu-latest + runs-on: ubuntu-latest-m steps: - uses: actions/checkout@v4.0.0 diff --git a/.github/workflows/sub-build-docker-image.yml b/.github/workflows/sub-build-docker-image.yml index 4c91e1965c4..35718ee4462 100644 --- a/.github/workflows/sub-build-docker-image.yml +++ b/.github/workflows/sub-build-docker-image.yml @@ -53,7 +53,7 @@ jobs: build: name: Build images timeout-minutes: 210 - runs-on: ubuntu-latest + runs-on: ubuntu-latest-l outputs: image_digest: ${{ steps.docker_build.outputs.digest }} image_name: ${{ fromJSON(steps.docker_build.outputs.metadata)['image.name'] }} From 23b76ff1cfb46161e47c39544c8f0592d42ceca5 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 26 Sep 2023 23:10:24 +0100 Subject: [PATCH 08/59] fix(workflow): remove code already in docker unit-test --- .github/workflows/ci-unit-tests-os.yml | 203 ------------------------- 1 file changed, 203 deletions(-) diff --git a/.github/workflows/ci-unit-tests-os.yml b/.github/workflows/ci-unit-tests-os.yml index fdc81a8cb1b..64c1ee66738 100644 --- a/.github/workflows/ci-unit-tests-os.yml +++ b/.github/workflows/ci-unit-tests-os.yml @@ -325,206 +325,3 @@ jobs: else echo "No unused dependencies found." fi - - ######################################## - ### Build and test Zebra with Docker ### - ######################################## - # Build the docker image used by the tests. - # - # The default network in the Zebra config in the image is mainnet, unless a manually triggered - # workflow or repository variable is configured differently. Testnet jobs change that config to - # testnet when running the image. - build: - name: Build CI Docker - uses: ./.github/workflows/sub-build-docker-image.yml - with: - dockerfile_path: ./docker/Dockerfile - dockerfile_target: tests - image_name: ${{ vars.CI_IMAGE_NAME }} - no_cache: ${{ inputs.no_cache || false }} - rust_backtrace: full - rust_lib_backtrace: full - rust_log: info - - # zebrad tests without cached state - - # TODO: make the non-cached-state tests use: - # network: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Run all the zebra tests, including tests that are ignored by default. - # Skips tests that need a cached state disk or a lightwalletd binary. - # - # - We run all the tests behind the `getblocktemplate-rpcs` feature as a separated step. - # - We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests. - # - # TODO: turn this test and the getblocktemplate test into a matrix, so the jobs use exactly the same diagnostics settings - test-all: - name: Test all - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Run unit, basic acceptance tests, and ignored tests, only showing command output if the test fails. - # - # If some tests hang, add "-- --nocapture" for just that test, or for all the tests. - - name: Run zebrad tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --name zebrad-tests --tty ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests" --workspace -- --include-ignored - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # zebrad tests without cached state with `getblocktemplate-rpcs` feature - # - # Same as above but we run all the tests behind the `getblocktemplate-rpcs` feature. - test-all-getblocktemplate-rpcs: - name: Test all with getblocktemplate-rpcs feature - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run zebrad tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --name zebrad-tests --tty -e ${{ inputs.network || vars.ZCASH_NETWORK }} ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features "lightwalletd-grpc-tests getblocktemplate-rpcs" --workspace -- --include-ignored - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Run state tests with fake activation heights. - # - # This test changes zebra-chain's activation heights, - # which can recompile all the Zebra crates, - # so we want its build products to be cached separately. - # - # Also, we don't want to accidentally use the fake heights in other tests. - # - # (The gRPC feature is a zebrad feature, so it isn't needed here.) - test-fake-activation-heights: - name: Test with fake activation heights - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests with fake activation heights - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK -e TEST_FAKE_ACTIVATION_HEIGHTS --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --package zebra-state --lib -- --nocapture --include-ignored with_fake_activation_heights - env: - TEST_FAKE_ACTIVATION_HEIGHTS: '1' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test that Zebra syncs and checkpoints a few thousand blocks from an empty state. - # - # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) - test-empty-sync: - name: Test checkpoint sync from empty state - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run zebrad large sync tests - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --name zebrad-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored sync_large_checkpoints_ - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test launching lightwalletd with an empty lightwalletd and Zebra state. - # - # (We activate the gRPC feature to avoid recompiling `zebrad`, but we don't actually run any gRPC tests.) - test-lightwalletd-integration: - name: Test integration with lightwalletd - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests with empty lightwalletd launch - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK -e ZEBRA_TEST_LIGHTWALLETD --name lightwalletd-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} cargo test --locked --release --features lightwalletd-grpc-tests --package zebrad --test acceptance -- --nocapture --include-ignored lightwalletd_integration - env: - ZEBRA_TEST_LIGHTWALLETD: '1' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test that Zebra works using the default config with the latest Zebra version - test-configuration-file: - name: Test Zebra default Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests using the default config - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start - EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) - docker stop default-conf-tests - docker logs default-conf-tests - exit "$EXIT_STATUS" - env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - - # Test that Zebra works using the $ZEBRA_CONF_PATH config - test-zebra-conf-path: - name: Test Zebra custom Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests using the $ZEBRA_CONF_PATH - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start - EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) - docker stop variable-conf-tests - docker logs variable-conf-tests - exit "$EXIT_STATUS" - env: - ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} From d5ca031e90d78a44a3bf9312f8ad4e9cceecbd67 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 26 Sep 2023 23:27:39 +0100 Subject: [PATCH 09/59] fix(unit-tests): start zebra the right way --- .github/workflows/ci-unit-tests-docker.yml | 48 +++++++++------------- 1 file changed, 20 insertions(+), 28 deletions(-) diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 567fb6fe98c..3d7ab384e1e 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -221,14 +221,30 @@ jobs: with: short-length: 7 + # Make sure Zebra can sync at least one full checkpoint on mainnet - name: Run tests using the default config run: | set -ex docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start - EXIT_STATUS=$(docker logs --tail all --follow default-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'estimated progress to chain tip.*BeforeOverwinter'; echo $?; ) + # show the logs, even if the job times out + docker logs --tail all --follow default-conf-tests | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + 'net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter' docker stop default-conf-tests + # get the exit status from docker + EXIT_STATUS=$( \ + docker wait default-conf-tests || \ + docker inspect --format "{{.State.ExitCode}}" default-conf-tests || \ + echo "missing container, or missing exit status for container" \ + ) docker logs default-conf-tests + echo "docker exit status: $EXIT_STATUS" + if [[ "$EXIT_STATUS" = "137" ]]; then + echo "ignoring expected signal status" + exit 0 + fi exit "$EXIT_STATUS" env: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} @@ -260,32 +276,6 @@ jobs: ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - # Make sure Zebra can sync at least one full checkpoint on mainnet - - name: Run tests using the default config - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - # show the logs, even if the job times out - docker logs --tail all --follow default-conf-tests | \ - tee --output-error=exit /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - 'net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter' - docker stop default-conf-tests - # get the exit status from docker - EXIT_STATUS=$( \ - docker wait default-conf-tests || \ - docker inspect --format "{{.State.ExitCode}}" default-conf-tests || \ - echo "missing container, or missing exit status for container" \ - ) - docker logs default-conf-tests - echo "docker exit status: $EXIT_STATUS" - if [[ "$EXIT_STATUS" = "137" ]]; then - echo "ignoring expected signal status" - exit 0 - fi - exit "$EXIT_STATUS" - # Test reconfiguring the docker image for testnet. test-configuration-file-testnet: name: Test testnet Zebra CD Docker config file @@ -305,7 +295,7 @@ jobs: run: | set -ex docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run --env "NETWORK=Testnet" --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start # show the logs, even if the job times out docker logs --tail all --follow testnet-conf-tests | \ tee --output-error=exit /dev/stderr | \ @@ -326,6 +316,8 @@ jobs: exit 0 fi exit "$EXIT_STATUS" + env: + NETWORK: Testnet failure-issue: name: Open or update issues for main branch failures From 572b5938afef6fed492886b3685d0dbea3c0a693 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 26 Sep 2023 23:29:37 +0100 Subject: [PATCH 10/59] fix: typo in patch name --- .../{ci-unites-tests-os.patch.yml => ci-unit-tests-os.patch.yml} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename .github/workflows/{ci-unites-tests-os.patch.yml => ci-unit-tests-os.patch.yml} (100%) diff --git a/.github/workflows/ci-unites-tests-os.patch.yml b/.github/workflows/ci-unit-tests-os.patch.yml similarity index 100% rename from .github/workflows/ci-unites-tests-os.patch.yml rename to .github/workflows/ci-unit-tests-os.patch.yml From b17c109a3588f659804d874b63d8d08389b0750a Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 26 Sep 2023 23:48:59 +0100 Subject: [PATCH 11/59] chore: move job to logical order --- .github/workflows/ci-unit-tests-docker.yml | 56 +++++++++++----------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 3d7ab384e1e..c42dd1e7a7b 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -249,36 +249,9 @@ jobs: env: NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - # Test that Zebra works using the $ZEBRA_CONF_PATH config - test-zebra-conf-path: - name: Test Zebra custom Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest - needs: build - steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - - name: Run tests using the $ZEBRA_CONF_PATH - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start - EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) - docker stop variable-conf-tests - docker logs variable-conf-tests - exit "$EXIT_STATUS" - env: - ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} - # Test reconfiguring the docker image for testnet. test-configuration-file-testnet: - name: Test testnet Zebra CD Docker config file + name: Test Zebra testnet Docker config file timeout-minutes: 15 runs-on: ubuntu-latest needs: build @@ -319,6 +292,33 @@ jobs: env: NETWORK: Testnet + # Test that Zebra works using the $ZEBRA_CONF_PATH config + test-zebra-conf-path: + name: Test Zebra custom Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run tests using the $ZEBRA_CONF_PATH + run: | + set -ex + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start + EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) + docker stop variable-conf-tests + docker logs variable-conf-tests + exit "$EXIT_STATUS" + env: + ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' + NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + failure-issue: name: Open or update issues for main branch failures # When a new test is added to this workflow, add it to this list. From 43f089ddde61ea8c36b940f9499449c8434a03b5 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 5 Oct 2023 14:41:06 +0100 Subject: [PATCH 12/59] imp(workflows): use better name for gcp tests --- .../ci-integration-tests-gcp.patch.yml | 2 +- .../workflows/ci-integration-tests-gcp.yml | 32 +++++++++---------- .../workflows/ci-unit-tests-docker.patch.yml | 2 +- .github/workflows/ci-unit-tests-docker.yml | 4 +-- ...l => sub-deploy-integration-tests-gcp.yml} | 0 5 files changed, 20 insertions(+), 20 deletions(-) rename .github/workflows/{cd-deploy-integration-tests-gcp.yml => sub-deploy-integration-tests-gcp.yml} (100%) diff --git a/.github/workflows/ci-integration-tests-gcp.patch.yml b/.github/workflows/ci-integration-tests-gcp.patch.yml index 0c76bad9382..9178709dadc 100644 --- a/.github/workflows/ci-integration-tests-gcp.patch.yml +++ b/.github/workflows/ci-integration-tests-gcp.patch.yml @@ -21,7 +21,7 @@ on: - 'docker/**' - '.dockerignore' - '.github/workflows/ci-unit-tests-docker.yml' - - '.github/workflows/cd-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - '.github/workflows/manual-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' diff --git a/.github/workflows/ci-integration-tests-gcp.yml b/.github/workflows/ci-integration-tests-gcp.yml index bcd9d4324da..c3f262f7c7a 100644 --- a/.github/workflows/ci-integration-tests-gcp.yml +++ b/.github/workflows/ci-integration-tests-gcp.yml @@ -63,7 +63,7 @@ on: # workflow definitions - 'docker/**' - '.github/workflows/ci-integration-tests-gcp.yml' - - '.github/workflows/cd-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - '.github/workflows/sub-build-docker-image.yml' - '.github/workflows/manual-find-cached-disks.yml' @@ -87,7 +87,7 @@ on: - 'docker/**' - '.dockerignore' - '.github/workflows/ci-integration-tests-gcp.yml' - - '.github/workflows/cd-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - '.github/workflows/manual-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' @@ -147,7 +147,7 @@ jobs: regenerate-stateful-disks: name: Zebra checkpoint needs: [ build, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || github.event.inputs.regenerate-disks == 'true' }} with: app_name: zebrad @@ -176,7 +176,7 @@ jobs: test-stateful-sync: name: Zebra checkpoint update needs: [ regenerate-stateful-disks, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_checkpoint_disk) || needs.regenerate-stateful-disks.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -204,7 +204,7 @@ jobs: test-full-sync: name: Zebra tip needs: [ build, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet') }} with: app_name: zebrad @@ -247,7 +247,7 @@ jobs: test-update-sync: name: Zebra tip update needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -280,7 +280,7 @@ jobs: generate-checkpoints-mainnet: name: Generate checkpoints mainnet needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -315,7 +315,7 @@ jobs: test-full-sync-testnet: name: Zebra tip on testnet needs: [ build, get-available-disks-testnet ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ (github.event_name == 'schedule' && vars.SCHEDULE_TESTNET_FULL_SYNC == 'true') || !fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || (github.event.inputs.run-full-sync == 'true' && (inputs.network || vars.ZCASH_NETWORK) == 'Testnet') }} with: app_name: zebrad @@ -361,7 +361,7 @@ jobs: generate-checkpoints-testnet: name: Generate checkpoints testnet needs: [ test-full-sync-testnet, get-available-disks-testnet ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks-testnet.outputs.zebra_tip_disk) || needs.test-full-sync-testnet.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -394,7 +394,7 @@ jobs: lightwalletd-full-sync: name: lightwalletd tip needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml # Currently the lightwalletd tests only work on Mainnet if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && (github.event_name == 'schedule' || !fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || github.event.inputs.run-lwd-sync == 'true' ) }} with: @@ -434,7 +434,7 @@ jobs: lightwalletd-update-sync: name: lightwalletd tip update needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -466,7 +466,7 @@ jobs: lightwalletd-rpc-test: name: Zebra tip JSON-RPC needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -491,7 +491,7 @@ jobs: lightwalletd-transactions-test: name: lightwalletd tip send needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -518,7 +518,7 @@ jobs: lightwalletd-grpc-test: name: lightwalletd GRPC tests needs: [ lightwalletd-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (inputs.network || vars.ZCASH_NETWORK) == 'Mainnet' && (fromJSON(needs.get-available-disks.outputs.lwd_tip_disk) || needs.lightwalletd-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: lightwalletd @@ -549,7 +549,7 @@ jobs: get-block-template-test: name: get block template needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad @@ -575,7 +575,7 @@ jobs: submit-block-test: name: submit block needs: [ test-full-sync, get-available-disks ] - uses: ./.github/workflows/cd-deploy-integration-tests-gcp.yml + uses: ./.github/workflows/sub-deploy-integration-tests-gcp.yml if: ${{ !cancelled() && !failure() && (fromJSON(needs.get-available-disks.outputs.zebra_tip_disk) || needs.test-full-sync.result == 'success') && github.event.inputs.regenerate-disks != 'true' && github.event.inputs.run-full-sync != 'true' && github.event.inputs.run-lwd-sync != 'true' }} with: app_name: zebrad diff --git a/.github/workflows/ci-unit-tests-docker.patch.yml b/.github/workflows/ci-unit-tests-docker.patch.yml index 66922ab68ed..e3be49f6b6d 100644 --- a/.github/workflows/ci-unit-tests-docker.patch.yml +++ b/.github/workflows/ci-unit-tests-docker.patch.yml @@ -21,7 +21,7 @@ on: - 'docker/**' - '.dockerignore' - '.github/workflows/ci-unit-tests-docker.yml' - - '.github/workflows/cd-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - '.github/workflows/manual-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index c42dd1e7a7b..f4b3b09fc90 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -37,7 +37,7 @@ on: # workflow definitions - 'docker/**' - '.github/workflows/ci-unit-tests-docker.yml' - - '.github/workflows/cd-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - '.github/workflows/sub-build-docker-image.yml' - '.github/workflows/manual-find-cached-disks.yml' @@ -61,7 +61,7 @@ on: - 'docker/**' - '.dockerignore' - '.github/workflows/ci-unit-tests-docker.yml' - - '.github/workflows/cd-deploy-integration-tests-gcp.yml' + - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - '.github/workflows/manual-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' diff --git a/.github/workflows/cd-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml similarity index 100% rename from .github/workflows/cd-deploy-integration-tests-gcp.yml rename to .github/workflows/sub-deploy-integration-tests-gcp.yml From 3dce3690937f827805af8e099880a14e783e56d6 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 5 Oct 2023 17:11:24 +0100 Subject: [PATCH 13/59] ref(workflow): use a single job to run GCP tests --- .../sub-deploy-integration-tests-gcp.yml | 261 +++--------------- 1 file changed, 32 insertions(+), 229 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 8d47b183586..f8a19870dcd 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -104,127 +104,14 @@ env: CACHED_STATE_UPDATE_LIMIT: 576 jobs: - # set up and launch the test, if it doesn't use any cached state - # each test runs one of the *-with/without-cached-state job series, and skips the other - launch-without-cached-state: - name: Launch ${{ inputs.test_id }} test - if: ${{ !inputs.needs_zebra_state }} - runs-on: zfnd-runners - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v4.0.0 - with: - persist-credentials: false - fetch-depth: '2' - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Makes the Zcash network name lowercase. - # - # Labels in GCP are required to be in lowercase, but the blockchain network - # uses sentence case, so we need to downcase ${{ inputs.network }}. - # - # Passes ${{ inputs.network }} to subsequent steps using $NETWORK env variable. - - name: Downcase network name for labels - run: | - NETWORK_CAPS="${{ inputs.network }}" - echo "NETWORK=${NETWORK_CAPS,,}" >> "$GITHUB_ENV" - - # Install our SSH secret - - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.5.1 - with: - key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} - name: google_compute_engine - known_hosts: unnecessary - - - name: Generate public SSH key - run: | - sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client - ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - - # Create a Compute Engine virtual machine - - name: Create ${{ inputs.test_id }} GCP compute instance - id: create-instance - run: | - gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ - --boot-disk-size 300GB \ - --boot-disk-type pd-ssd \ - --image-project=cos-cloud \ - --image-family=cos-stable \ - --create-disk=name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ - --container-image=gcr.io/google-containers/busybox \ - --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ - --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ - --scopes cloud-platform \ - --metadata=google-monitoring-enabled=TRUE,google-logging-enabled=TRUE \ - --metadata-from-file=startup-script=.github/workflows/scripts/gcp-vm-startup-script.sh \ - --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ - --tags ${{ inputs.app_name }} \ - --zone ${{ vars.GCP_ZONE }} - sleep 60 - - # Create a docker volume with the new disk we just created. - # - # SSH into the just created VM, and create a docker volume with the newly created disk. - - name: Create ${{ inputs.test_id }} Docker volume - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - sudo mkfs.ext4 -v /dev/sdb \ - && \ - sudo docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ - ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ - " - - # Launch the test without any cached state - - name: Launch ${{ inputs.test_id }} test - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - sudo docker run \ - --name ${{ inputs.test_id }} \ - --tty \ - --detach \ - ${{ inputs.test_variables }} \ - --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ - " - - # set up and launch the test, if it uses cached state - # each test runs one of the *-with/without-cached-state job series, and skips the other - launch-with-cached-state: - name: Launch ${{ inputs.test_id }} test - if: ${{ inputs.needs_zebra_state }} + # Show all the test logs, then follow the logs of the test we just launched, until it finishes. + # Then check the result of the test. + # + # If `inputs.is_long_test` is `true`, the timeout is 5 days, otherwise it's 3 hours. + test-result: + name: Run ${{ inputs.test_id }} test runs-on: zfnd-runners + timeout-minutes: ${{ inputs.is_long_test && 7200 || 180 }} outputs: cached_disk_name: ${{ steps.get-disk-name.outputs.cached_disk_name }} permissions: @@ -295,6 +182,7 @@ jobs: # TODO: move this script into a file, and call it from manual-find-cached-disks.yml as well. - name: Find ${{ inputs.test_id }} cached state disk id: get-disk-name + if: ${{ inputs.needs_zebra_state || inputs.needs_lwd_state }} run: | LOCAL_STATE_VERSION=$(grep -oE "DATABASE_FORMAT_VERSION: .* [0-9]+" "$GITHUB_WORKSPACE/zebra-state/src/constants.rs" | grep -oE "[0-9]+" | tail -n1) echo "STATE_VERSION: $LOCAL_STATE_VERSION" @@ -361,18 +249,21 @@ jobs: echo "STATE_VERSION=$LOCAL_STATE_VERSION" >> "$GITHUB_ENV" echo "CACHED_DISK_NAME=$CACHED_DISK_NAME" >> "$GITHUB_ENV" + echo "DISK_OPTION=image=$CACHED_DISK_NAME," >> "$GITHUB_ENV" # Create a Compute Engine virtual machine and attach a cached state disk using the # $CACHED_DISK_NAME variable as the source image to populate the disk cached state + # if the test needs it. - name: Create ${{ inputs.test_id }} GCP compute instance id: create-instance run: | + DISK_OPTION=${{ steps.get-disk-name.outputs.disk_option }} gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --boot-disk-size 300GB \ --boot-disk-type pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ - --create-disk=image=${{ env.CACHED_DISK_NAME }},name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ + --create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ --container-image=gcr.io/google-containers/busybox \ --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ @@ -382,9 +273,8 @@ jobs: --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ --tags ${{ inputs.app_name }} \ --zone ${{ vars.GCP_ZONE }} - sleep 60 - # Create a docker volume with the selected cached state. + # Create a docker volume with the new disk we just created or the cached state. # # SSH into the just created VM and create a docker volume with the recently attached disk. # (The cached state and disk are usually the same size, @@ -398,53 +288,16 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ + sudo mkfs.ext4 -v /dev/sdb \ + && \ sudo docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ " - # Launch the test with the previously created Zebra-only cached state. - # Each test runs one of the "Launch test" steps, and skips the other. - # - # SSH into the just created VM, and create a Docker container to run the incoming test - # from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job. - # - # The disk mounted in the VM is located at /dev/sdb, we mount the root `/` of this disk to the docker - # container in one path: - # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR + # Launch the test with the previously created disk or cached state. # - # This path must match the variable used by the tests in Rust, which are also set in - # `ci-unit-tests-docker.yml` to be able to run this tests. - # - # Although we're mounting the disk root, Zebra will only respect the values from - # $ZEBRA_CACHED_STATE_DIR. The inputs like ${{ inputs.zebra_state_dir }} are only used - # to match that variable paths. - - name: Launch ${{ inputs.test_id }} test - # This step only runs for tests that just read or write a Zebra state. - # - # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. - # TODO: we should find a better logic for this use cases - if: ${{ (inputs.needs_zebra_state && !inputs.needs_lwd_state) && inputs.test_id != 'lwd-full-sync' }} - run: | - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ - --zone ${{ vars.GCP_ZONE }} \ - --ssh-flag="-o ServerAliveInterval=5" \ - --ssh-flag="-o ConnectionAttempts=20" \ - --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - # Wait for the disk to be attached - while [[ ! -e /dev/sdb ]]; do sleep 1; done && \ - sudo docker run \ - --name ${{ inputs.test_id }} \ - --tty \ - --detach \ - ${{ inputs.test_variables }} \ - --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ - " - - # Launch the test with the previously created Lightwalletd and Zebra cached state. - # Each test runs one of the "Launch test" steps, and skips the other. + # This step uses a $MOUNT_FLAGS variable to mount the disk to the docker container. + # If the test needs Lightwalletd state, we add the Lightwalletd state mount to the $MOUNT_FLAGS variable. # # SSH into the just created VM, and create a Docker container to run the incoming test # from ${{ inputs.test_id }}, then mount the sudo docker volume created in the previous job. @@ -454,7 +307,7 @@ jobs: # considerations. # # The disk mounted in the VM is located at /dev/sdb, we mount the root `/` of this disk to the docker - # container in two different paths: + # container, and might have two different paths (if lightwalletd state is needed): # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR # - /var/cache/lwd-cache -> ${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} -> $LIGHTWALLETD_DATA_DIR # @@ -462,19 +315,22 @@ jobs: # subdirectories for their data. (But Zebra, lightwalletd, and the test harness must not # delete the whole cache directory.) # - # This paths must match the variables used by the tests in Rust, which are also set in + # This path must match the variable used by the tests in Rust, which are also set in # `ci-unit-tests-docker.yml` to be able to run this tests. # # Although we're mounting the disk root to both directories, Zebra and Lightwalletd # will only respect the values from $ZEBRA_CACHED_STATE_DIR and $LIGHTWALLETD_DATA_DIR, - # the inputs like ${{ inputs.lwd_state_dir }} are only used to match those variables paths. + # the inputs like ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }} + # are only used to match those variables paths. - name: Launch ${{ inputs.test_id }} test - # This step only runs for tests that read or write Lightwalletd and Zebra states. - # - # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. - # TODO: we should find a better logic for this use cases - if: ${{ (inputs.needs_zebra_state && inputs.needs_lwd_state) || inputs.test_id == 'lwd-full-sync' }} run: | + MOUNT_FLAGS="--mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}" + + # Check if we need to mount for Lightwalletd state + if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then + MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}" + fi + gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ --ssh-flag="-o ServerAliveInterval=5" \ @@ -489,63 +345,10 @@ jobs: --tty \ --detach \ ${{ inputs.test_variables }} \ - --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} \ - --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} \ + $MOUNT_FLAGS \ ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ " - # Show all the test logs, then follow the logs of the test we just launched, until it finishes. - # Then check the result of the test. - # - # If `inputs.is_long_test` is `true`, the timeout is 5 days, otherwise it's 3 hours. - test-result: - name: Run ${{ inputs.test_id }} test - # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. - needs: [ launch-with-cached-state, launch-without-cached-state ] - # If the previous job fails, we also want to run and fail this job, - # so that the branch protection rule fails in Mergify and GitHub. - if: ${{ !cancelled() }} - timeout-minutes: ${{ inputs.is_long_test && 7200 || 180 }} - runs-on: zfnd-runners - permissions: - contents: 'read' - id-token: 'write' - steps: - - uses: actions/checkout@v4.0.0 - with: - persist-credentials: false - fetch-depth: '2' - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - - # Install our SSH secret - - name: Install private SSH key - uses: shimataro/ssh-key-action@v2.5.1 - with: - key: ${{ secrets.GCP_SSH_PRIVATE_KEY }} - name: google_compute_engine - known_hosts: unnecessary - - - name: Generate public SSH key - run: | - sudo apt-get update && sudo apt-get -qq install -y --no-install-recommends openssh-client - ssh-keygen -y -f ~/.ssh/google_compute_engine > ~/.ssh/google_compute_engine.pub - - # Setup gcloud CLI - - name: Authenticate to Google Cloud - id: auth - uses: google-github-actions/auth@v1.1.1 - with: - retries: '3' - workload_identity_provider: '${{ vars.GCP_WIF }}' - service_account: '${{ vars.GCP_DEPLOYMENTS_SA }}' - - - name: Set up Cloud SDK - uses: google-github-actions/setup-gcloud@v1.1.1 - # Show all the logs since the container launched, # following until we see zebrad startup messages. # @@ -621,7 +424,7 @@ jobs: create-state-image: name: Create ${{ inputs.test_id }} cached state image runs-on: ubuntu-latest - needs: [ test-result, launch-with-cached-state ] + needs: [ test-result ] # We run exactly one of without-cached-state or with-cached-state, and we always skip the other one. # Normally, if a job is skipped, all the jobs that depend on it are also skipped. # So we need to override the default success() check to make this job run. @@ -864,7 +667,7 @@ jobs: - name: Get original cached state height from google cloud run: | ORIGINAL_HEIGHT="0" - ORIGINAL_DISK_NAME="${{ format('{0}', needs.launch-with-cached-state.outputs.cached_disk_name) }}" + ORIGINAL_DISK_NAME="${{ format('{0}', needs.test-result.outputs.cached_disk_name) }}" if [[ -n "$ORIGINAL_DISK_NAME" ]]; then ORIGINAL_HEIGHT=$(gcloud compute images list --filter="status=READY AND name=$ORIGINAL_DISK_NAME" --format="value(labels.height)") From f4358b273e4efd8e67191f1fff42608d8bbc3d2e Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 5 Oct 2023 18:36:03 +0100 Subject: [PATCH 14/59] fix(ci): do not format existing cached states if required --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index f8a19870dcd..83588ed0731 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -288,7 +288,10 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ - sudo mkfs.ext4 -v /dev/sdb \ + # Check if we need to format the disk if no state is needed + if [[ "${{ inputs.needs_zebra_state }}" == "false" || "${{ inputs.needs_lwd_state }}" == "false" ]]; then + sudo mkfs.ext4 -v /dev/sdb + fi \ && \ sudo docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ From 46d8bb445230bcf3c8620a441bde438b9750b897 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 5 Oct 2023 22:20:25 +0100 Subject: [PATCH 15/59] test: wait for the instance to be fully ready --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 83588ed0731..d48fddcade7 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -273,6 +273,8 @@ jobs: --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ --tags ${{ inputs.app_name }} \ --zone ${{ vars.GCP_ZONE }} + # Wait for the instance to be created and fully booted + sleep 90 # Create a docker volume with the new disk we just created or the cached state. # From 6f9d0f1f7be75bbfb83ab1c1bf768105ca2d396a Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 5 Oct 2023 22:32:36 +0100 Subject: [PATCH 16/59] fix(ci): use correct logic before formating --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index d48fddcade7..4683640775f 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -290,8 +290,9 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ + set -ex; \ # Check if we need to format the disk if no state is needed - if [[ "${{ inputs.needs_zebra_state }}" == "false" || "${{ inputs.needs_lwd_state }}" == "false" ]]; then + if [[ ${{ inputs.needs_zebra_state }} == false || ${{ inputs.needs_lwd_state }} == false ]]; then sudo mkfs.ext4 -v /dev/sdb fi \ && \ From 6fe96d81dfb2f1f8c3e71c299854d8d69077820b Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 5 Oct 2023 23:06:00 +0100 Subject: [PATCH 17/59] fix: use correct condition --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 4683640775f..c2eac40e4f7 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -292,7 +292,7 @@ jobs: "\ set -ex; \ # Check if we need to format the disk if no state is needed - if [[ ${{ inputs.needs_zebra_state }} == false || ${{ inputs.needs_lwd_state }} == false ]]; then + if [[ ${{ inputs.needs_zebra_state }} == false && ${{ inputs.needs_lwd_state }} == false ]]; then sudo mkfs.ext4 -v /dev/sdb fi \ && \ From bd32761ae6f80e62e178c222ae6c8c4430f09f8c Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 5 Oct 2023 23:49:46 +0100 Subject: [PATCH 18/59] fix: get more information --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index c2eac40e4f7..f28cd48fc19 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -292,7 +292,7 @@ jobs: "\ set -ex; \ # Check if we need to format the disk if no state is needed - if [[ ${{ inputs.needs_zebra_state }} == false && ${{ inputs.needs_lwd_state }} == false ]]; then + if [[ "${{ inputs.needs_zebra_state }}" == "false" && "${{ inputs.needs_lwd_state }}" == "false" ]]; then sudo mkfs.ext4 -v /dev/sdb fi \ && \ @@ -344,6 +344,7 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ + set -ex; \ # Wait for the disk to be attached while [[ ! -e /dev/sdb ]]; do sleep 1; done && \ sudo docker run \ @@ -351,7 +352,7 @@ jobs: --tty \ --detach \ ${{ inputs.test_variables }} \ - $MOUNT_FLAGS \ + ${MOUNT_FLAGS} \ ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} \ " From 6cecf7a8dbd385bf9b26aee94d8d251bc41206c8 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 6 Oct 2023 08:26:01 +0100 Subject: [PATCH 19/59] fix(ci): use better shell handling and upgrade OS --- .../sub-deploy-integration-tests-gcp.yml | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index f28cd48fc19..27bee656851 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -256,15 +256,15 @@ jobs: # if the test needs it. - name: Create ${{ inputs.test_id }} GCP compute instance id: create-instance + shell: /usr/bin/bash -ex {0} run: | - DISK_OPTION=${{ steps.get-disk-name.outputs.disk_option }} gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --boot-disk-size 300GB \ --boot-disk-type pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ - --create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ - --container-image=gcr.io/google-containers/busybox \ + --create-disk=${{ env.DISK_OPTION }}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ + --container-image=gcr.io/google-containers/ubuntu \ --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --scopes cloud-platform \ @@ -273,8 +273,8 @@ jobs: --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ --tags ${{ inputs.app_name }} \ --zone ${{ vars.GCP_ZONE }} - # Wait for the instance to be created and fully booted - sleep 90 + env: + DISK_OPTION: ${{ steps.get-disk-name.outputs.disk_option }} # Create a docker volume with the new disk we just created or the cached state. # @@ -282,6 +282,7 @@ jobs: # (The cached state and disk are usually the same size, # but the cached state can be smaller if we just increased the disk size.) - name: Create ${{ inputs.test_id }} Docker volume + shell: /usr/bin/bash -ex {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -290,7 +291,6 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ - set -ex; \ # Check if we need to format the disk if no state is needed if [[ "${{ inputs.needs_zebra_state }}" == "false" && "${{ inputs.needs_lwd_state }}" == "false" ]]; then sudo mkfs.ext4 -v /dev/sdb @@ -329,6 +329,7 @@ jobs: # the inputs like ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }} # are only used to match those variables paths. - name: Launch ${{ inputs.test_id }} test + shell: /usr/bin/bash -ex {0} run: | MOUNT_FLAGS="--mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}" @@ -344,7 +345,6 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ - set -ex; \ # Wait for the disk to be attached while [[ ! -e /dev/sdb ]]; do sleep 1; done && \ sudo docker run \ @@ -398,6 +398,7 @@ jobs: # with that status. # (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.) - name: Result of ${{ inputs.test_id }} test + shell: /usr/bin/bash -exo pipefail && trap '' PIPE && {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -405,10 +406,6 @@ jobs: --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ --command=' \ - set -e; - set -o pipefail; - trap '' PIPE; - sudo docker logs \ --tail all \ --follow \ From 15107e72c5ef54d285184eb76ed7aebf981f7490 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 6 Oct 2023 09:51:55 +0100 Subject: [PATCH 20/59] fix(ci): use better approach --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 27bee656851..65d68e71c02 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -291,6 +291,10 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ + while [[ ! -e /dev/sdb ]] || sudo lsof /dev/sdb &>/dev/null do \ + echo 'Waiting for /dev/sdb to be free...'; \ + sleep 10; \ + done; \ # Check if we need to format the disk if no state is needed if [[ "${{ inputs.needs_zebra_state }}" == "false" && "${{ inputs.needs_lwd_state }}" == "false" ]]; then sudo mkfs.ext4 -v /dev/sdb @@ -345,8 +349,6 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ - # Wait for the disk to be attached - while [[ ! -e /dev/sdb ]]; do sleep 1; done && \ sudo docker run \ --name ${{ inputs.test_id }} \ --tty \ @@ -398,7 +400,7 @@ jobs: # with that status. # (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.) - name: Result of ${{ inputs.test_id }} test - shell: /usr/bin/bash -exo pipefail && trap '' PIPE && {0} + shell: /usr/bin/bash -exo pipefail {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ From 97a833e62341328dabeab4e54e943be9186586ae Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 6 Oct 2023 10:04:18 +0100 Subject: [PATCH 21/59] fix: `$DISK_OPTION` is not being correctly passed --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 65d68e71c02..9e8ed3b8238 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -263,7 +263,7 @@ jobs: --boot-disk-type pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ - --create-disk=${{ env.DISK_OPTION }}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ + --create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ --container-image=gcr.io/google-containers/ubuntu \ --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ @@ -273,8 +273,6 @@ jobs: --labels=app=${{ inputs.app_name }},environment=test,network=${NETWORK},github_ref=${{ env.GITHUB_REF_SLUG_URL }},test=${{ inputs.test_id }} \ --tags ${{ inputs.app_name }} \ --zone ${{ vars.GCP_ZONE }} - env: - DISK_OPTION: ${{ steps.get-disk-name.outputs.disk_option }} # Create a docker volume with the new disk we just created or the cached state. # From 2cf694ff25eca616ec953602447c52f738822f03 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 6 Oct 2023 10:20:24 +0100 Subject: [PATCH 22/59] fix typo --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 9e8ed3b8238..a64fd18fcde 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -292,7 +292,7 @@ jobs: while [[ ! -e /dev/sdb ]] || sudo lsof /dev/sdb &>/dev/null do \ echo 'Waiting for /dev/sdb to be free...'; \ sleep 10; \ - done; \ + done \ # Check if we need to format the disk if no state is needed if [[ "${{ inputs.needs_zebra_state }}" == "false" && "${{ inputs.needs_lwd_state }}" == "false" ]]; then sudo mkfs.ext4 -v /dev/sdb From a57cbae0afccc9cdedbcdc19cb01757e8775f6fa Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 6 Oct 2023 10:30:51 +0100 Subject: [PATCH 23/59] fix: more typos --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index a64fd18fcde..32e03576ab7 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -289,10 +289,10 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ - while [[ ! -e /dev/sdb ]] || sudo lsof /dev/sdb &>/dev/null do \ + while [[ ! -e /dev/sdb ]] || sudo lsof /dev/sdb &>/dev/null; do \ echo 'Waiting for /dev/sdb to be free...'; \ sleep 10; \ - done \ + done; \ # Check if we need to format the disk if no state is needed if [[ "${{ inputs.needs_zebra_state }}" == "false" && "${{ inputs.needs_lwd_state }}" == "false" ]]; then sudo mkfs.ext4 -v /dev/sdb From 2fed739312cd821bf0cfef37fc2a31f3ea93db05 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 6 Oct 2023 10:32:42 +0100 Subject: [PATCH 24/59] fix: use busybox --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 32e03576ab7..b9aac11820e 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -264,7 +264,7 @@ jobs: --image-project=cos-cloud \ --image-family=cos-stable \ --create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ - --container-image=gcr.io/google-containers/ubuntu \ + --container-image=gcr.io/google-containers/busybox \ --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ --scopes cloud-platform \ From be484823dfc32879d3ce447b93838792afb9137f Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 6 Oct 2023 11:37:03 +0100 Subject: [PATCH 25/59] fix: mount Docker volume at run and not before --- .../sub-deploy-integration-tests-gcp.yml | 21 ++++++------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index b9aac11820e..4984b877566 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -274,12 +274,9 @@ jobs: --tags ${{ inputs.app_name }} \ --zone ${{ vars.GCP_ZONE }} - # Create a docker volume with the new disk we just created or the cached state. - # - # SSH into the just created VM and create a docker volume with the recently attached disk. - # (The cached state and disk are usually the same size, - # but the cached state can be smaller if we just increased the disk size.) - - name: Create ${{ inputs.test_id }} Docker volume + # Format the mounted disk if the test doesn't use a cached state. + - name: Format ${{ inputs.test_id }} volume + if: ${{ !inputs.needs_zebra_state || !inputs.needs_lwd_state }} shell: /usr/bin/bash -ex {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ @@ -293,13 +290,7 @@ jobs: echo 'Waiting for /dev/sdb to be free...'; \ sleep 10; \ done; \ - # Check if we need to format the disk if no state is needed - if [[ "${{ inputs.needs_zebra_state }}" == "false" && "${{ inputs.needs_lwd_state }}" == "false" ]]; then - sudo mkfs.ext4 -v /dev/sdb - fi \ - && \ - sudo docker volume create --driver local --opt type=ext4 --opt device=/dev/sdb \ - ${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} \ + sudo mkfs.ext4 -v /dev/sdb \ " # Launch the test with the previously created disk or cached state. @@ -333,11 +324,11 @@ jobs: - name: Launch ${{ inputs.test_id }} test shell: /usr/bin/bash -ex {0} run: | - MOUNT_FLAGS="--mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}" + MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/sdb,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}" # Check if we need to mount for Lightwalletd state if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then - MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,src=${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }},dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}" + MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/sdb,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}" fi gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ From 82ea71faea82b79c088aa06f484a072f34327674 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Fri, 6 Oct 2023 12:23:41 +0100 Subject: [PATCH 26/59] fix: use correct condition and simpler while --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 4984b877566..70d869e26be 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -276,7 +276,7 @@ jobs: # Format the mounted disk if the test doesn't use a cached state. - name: Format ${{ inputs.test_id }} volume - if: ${{ !inputs.needs_zebra_state || !inputs.needs_lwd_state }} + if: ${{ !inputs.needs_zebra_state && !inputs.needs_lwd_state }} shell: /usr/bin/bash -ex {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ @@ -286,7 +286,7 @@ jobs: --ssh-flag="-o ConnectTimeout=5" \ --command \ "\ - while [[ ! -e /dev/sdb ]] || sudo lsof /dev/sdb &>/dev/null; do \ + while sudo lsof /dev/sdb; do \ echo 'Waiting for /dev/sdb to be free...'; \ sleep 10; \ done; \ From f6001961b2d9c48d5d8525683b9756f21cfc1263 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 11 Oct 2023 20:35:34 +0100 Subject: [PATCH 27/59] add: missing merge changes --- .github/workflows/ci-unit-tests-docker.yml | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 66d9e8f42d3..f9ee194f49b 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -209,7 +209,7 @@ jobs: # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: - name: Test Zebra CD Docker config file + name: Test Zebra default Docker config file timeout-minutes: 15 runs-on: ubuntu-latest needs: build @@ -221,13 +221,10 @@ jobs: with: short-length: 7 - # Make sure Zebra can sync at least one full checkpoint on mainnet - name: Run tests using the default config - shell: /usr/bin/bash -exo pipefail {0} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start - # Use a subshell to handle the broken pipe error gracefully ( trap "" PIPE; @@ -255,11 +252,11 @@ jobs: echo "An error occurred while processing the logs."; exit 1; env: - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + NETWORK: Mainnet # Test reconfiguring the docker image for testnet. test-configuration-file-testnet: - name: Test testnet Zebra CD Docker config file + name: Test Zebra testnet Docker config file timeout-minutes: 15 runs-on: ubuntu-latest needs: build @@ -273,7 +270,6 @@ jobs: # Make sure Zebra can sync the genesis block on testnet - name: Run tests using a testnet config - shell: /usr/bin/bash -exo pipefail {0} run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} docker run -e NETWORK --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start @@ -325,7 +321,7 @@ jobs: run: | set -ex docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} -c $ZEBRA_CONF_PATH start + docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) docker stop variable-conf-tests docker logs variable-conf-tests @@ -356,4 +352,3 @@ jobs: # If there is already an open issue with this label, any failures become comments on that issue. always-create-new-issue: false github-token: ${{ secrets.GITHUB_TOKEN }} - From 3db9e6c89733c5baf6c573a3d555a4f8c9178016 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 11 Oct 2023 20:37:08 +0100 Subject: [PATCH 28/59] chore: use better name for find-disks --- .github/workflows/ci-integration-tests-gcp.patch.yml | 2 +- .github/workflows/ci-integration-tests-gcp.yml | 12 ++++++------ .github/workflows/ci-unit-tests-docker.patch.yml | 2 +- .github/workflows/ci-unit-tests-docker.yml | 4 ++-- .../workflows/sub-deploy-integration-tests-gcp.yml | 2 +- ...nd-cached-disks.yml => sub-find-cached-disks.yml} | 0 6 files changed, 11 insertions(+), 11 deletions(-) rename .github/workflows/{manual-find-cached-disks.yml => sub-find-cached-disks.yml} (100%) diff --git a/.github/workflows/ci-integration-tests-gcp.patch.yml b/.github/workflows/ci-integration-tests-gcp.patch.yml index 770d745c1e0..f671429b5bf 100644 --- a/.github/workflows/ci-integration-tests-gcp.patch.yml +++ b/.github/workflows/ci-integration-tests-gcp.patch.yml @@ -22,7 +22,7 @@ on: - '.dockerignore' - '.github/workflows/ci-unit-tests-docker.yml' - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' jobs: diff --git a/.github/workflows/ci-integration-tests-gcp.yml b/.github/workflows/ci-integration-tests-gcp.yml index fc03b54c57c..1132762c210 100644 --- a/.github/workflows/ci-integration-tests-gcp.yml +++ b/.github/workflows/ci-integration-tests-gcp.yml @@ -65,7 +65,7 @@ on: - '.github/workflows/ci-integration-tests-gcp.yml' - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - '.github/workflows/sub-build-docker-image.yml' - - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-find-cached-disks.yml' push: branches: @@ -88,7 +88,7 @@ on: - '.dockerignore' - '.github/workflows/ci-integration-tests-gcp.yml' - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' jobs: @@ -101,20 +101,20 @@ jobs: # The default network is mainnet unless a manually triggered workflow or repository variable # is configured differently. # - # The outputs for this job have the same names as the workflow outputs in manual-find-cached-disks.yml + # The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml get-available-disks: name: Check if cached state disks exist for ${{ inputs.network || vars.ZCASH_NETWORK }} - uses: ./.github/workflows/manual-find-cached-disks.yml + uses: ./.github/workflows/sub-find-cached-disks.yml with: network: ${{ inputs.network || vars.ZCASH_NETWORK }} # Check if the cached state disks used by the tests are available for testnet. # - # The outputs for this job have the same names as the workflow outputs in manual-find-cached-disks.yml + # The outputs for this job have the same names as the workflow outputs in sub-find-cached-disks.yml # Some outputs are ignored, because we don't run those jobs on testnet. get-available-disks-testnet: name: Check if cached state disks exist for testnet - uses: ./.github/workflows/manual-find-cached-disks.yml + uses: ./.github/workflows/sub-find-cached-disks.yml with: network: 'Testnet' diff --git a/.github/workflows/ci-unit-tests-docker.patch.yml b/.github/workflows/ci-unit-tests-docker.patch.yml index e3be49f6b6d..28f9e2cdd21 100644 --- a/.github/workflows/ci-unit-tests-docker.patch.yml +++ b/.github/workflows/ci-unit-tests-docker.patch.yml @@ -22,7 +22,7 @@ on: - '.dockerignore' - '.github/workflows/ci-unit-tests-docker.yml' - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' jobs: diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index f9ee194f49b..12806a035e0 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -39,7 +39,7 @@ on: - '.github/workflows/ci-unit-tests-docker.yml' - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - '.github/workflows/sub-build-docker-image.yml' - - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-find-cached-disks.yml' push: branches: @@ -62,7 +62,7 @@ on: - '.dockerignore' - '.github/workflows/ci-unit-tests-docker.yml' - '.github/workflows/sub-deploy-integration-tests-gcp.yml' - - '.github/workflows/manual-find-cached-disks.yml' + - '.github/workflows/sub-find-cached-disks.yml' - '.github/workflows/sub-build-docker-image.yml' jobs: diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 65e7eb8c645..e718eaa43af 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -311,7 +311,7 @@ jobs: # Passes the disk name to subsequent steps using $CACHED_DISK_NAME env variable # Passes the state version to subsequent steps using $STATE_VERSION env variable # - # TODO: move this script into a file, and call it from manual-find-cached-disks.yml as well. + # TODO: move this script into a file, and call it from sub-find-cached-disks.yml as well. - name: Find ${{ inputs.test_id }} cached state disk id: get-disk-name run: | diff --git a/.github/workflows/manual-find-cached-disks.yml b/.github/workflows/sub-find-cached-disks.yml similarity index 100% rename from .github/workflows/manual-find-cached-disks.yml rename to .github/workflows/sub-find-cached-disks.yml From 2871b2fad13fe6195c1c44495105d6351fc28bf7 Mon Sep 17 00:00:00 2001 From: arya2 Date: Wed, 11 Oct 2023 15:39:35 -0400 Subject: [PATCH 29/59] Adds zs_iter_opts method --- .../src/service/finalized_state/disk_db.rs | 35 ++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 3772fb7a789..1ca37fc5cc4 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -21,6 +21,7 @@ use std::{ use itertools::Itertools; use rlimit::increase_nofile_limit; +use rocksdb::ReadOptions; use zebra_chain::parameters::Network; use crate::{ @@ -495,11 +496,12 @@ impl DiskDb { let range = (start_bound, end_bound); let mode = Self::zs_iter_mode(&range, reverse); + let opts = Self::zs_iter_opts(&range); // Reading multiple items from iterators has caused database hangs, // in previous RocksDB versions self.db - .iterator_cf(cf, mode) + .iterator_cf_opt(cf, opts, mode) .map(|result| result.expect("unexpected database failure")) .map(|(key, value)| (key.to_vec(), value)) // Skip excluded "from" bound and empty ranges. The `mode` already skips keys @@ -514,6 +516,37 @@ impl DiskDb { .map(|(key, value)| (K::from_bytes(key), V::from_bytes(value))) } + /// Returns the RocksDB ReadOptions with a lower and upper bound for a range. + fn zs_iter_opts(range: &R) -> ReadOptions + where + R: RangeBounds>, + { + use std::ops::Bound::*; + + let mut opts = ReadOptions::default(); + + if let Included(bound) | Excluded(bound) = range.start_bound() { + opts.set_iterate_lower_bound(bound.clone()); + }; + + match range.end_bound().cloned() { + Included(mut bound) => { + // Increment the last byte in the vector that is not u8::MAX, or + // skip adding an upper bound if every byte is u8::MAX + if let Some(increment_idx) = bound.iter().rposition(|&v| v != u8::MAX) { + bound[increment_idx] += 1; + opts.set_iterate_upper_bound(bound); + } + } + Excluded(bound) => { + opts.set_iterate_upper_bound(bound); + } + Unbounded => {} + }; + + opts + } + /// Returns the RocksDB iterator "from" mode for `range`. /// /// RocksDB iterators are ordered by increasing key bytes by default. From d815ae5292328846ffcb852193eeff00f97456be Mon Sep 17 00:00:00 2001 From: arya2 Date: Wed, 11 Oct 2023 15:50:14 -0400 Subject: [PATCH 30/59] uses checked_add --- zebra-state/src/service/finalized_state/disk_db.rs | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 1ca37fc5cc4..fb750aca2db 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -534,7 +534,13 @@ impl DiskDb { // Increment the last byte in the vector that is not u8::MAX, or // skip adding an upper bound if every byte is u8::MAX if let Some(increment_idx) = bound.iter().rposition(|&v| v != u8::MAX) { - bound[increment_idx] += 1; + let increment_byte = bound + .get_mut(increment_idx) + .expect("index should be in bounds"); + *increment_byte = increment_byte + .checked_add(1) + .expect("adding 1 should succeed"); + opts.set_iterate_upper_bound(bound); } } From 49123208d0347283e03d97577a0ed19e6c3bc3c0 Mon Sep 17 00:00:00 2001 From: arya2 Date: Wed, 11 Oct 2023 16:05:47 -0400 Subject: [PATCH 31/59] uses zs_range_iter for other read methods --- .../src/service/finalized_state/disk_db.rs | 59 ++++++------------- .../finalized_state/zebra_db/shielded.rs | 12 ++-- 2 files changed, 23 insertions(+), 48 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index fb750aca2db..806cc9536d4 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -195,7 +195,7 @@ pub trait ReadDisk { fn zs_first_key_value(&self, cf: &C) -> Option<(K, V)> where C: rocksdb::AsColumnFamilyRef, - K: FromDisk, + K: IntoDisk + FromDisk, V: FromDisk; /// Returns the highest key in `cf`, and the corresponding value. @@ -204,7 +204,7 @@ pub trait ReadDisk { fn zs_last_key_value(&self, cf: &C) -> Option<(K, V)> where C: rocksdb::AsColumnFamilyRef, - K: FromDisk, + K: IntoDisk + FromDisk, V: FromDisk; /// Returns the first key greater than or equal to `lower_bound` in `cf`, @@ -322,34 +322,22 @@ impl ReadDisk for DiskDb { fn zs_first_key_value(&self, cf: &C) -> Option<(K, V)> where C: rocksdb::AsColumnFamilyRef, - K: FromDisk, + K: IntoDisk + FromDisk, V: FromDisk, { // Reading individual values from iterators does not seem to cause database hangs. - self.db - .iterator_cf(cf, rocksdb::IteratorMode::Start) - .next()? - .map(|(key_bytes, value_bytes)| { - Some((K::from_bytes(key_bytes), V::from_bytes(value_bytes))) - }) - .expect("unexpected database failure") + self.zs_range_iter(cf, .., false).next() } #[allow(clippy::unwrap_in_result)] fn zs_last_key_value(&self, cf: &C) -> Option<(K, V)> where C: rocksdb::AsColumnFamilyRef, - K: FromDisk, + K: IntoDisk + FromDisk, V: FromDisk, { // Reading individual values from iterators does not seem to cause database hangs. - self.db - .iterator_cf(cf, rocksdb::IteratorMode::End) - .next()? - .map(|(key_bytes, value_bytes)| { - Some((K::from_bytes(key_bytes), V::from_bytes(value_bytes))) - }) - .expect("unexpected database failure") + self.zs_range_iter(cf, .., true).next() } #[allow(clippy::unwrap_in_result)] @@ -359,17 +347,8 @@ impl ReadDisk for DiskDb { K: IntoDisk + FromDisk, V: FromDisk, { - let lower_bound = lower_bound.as_bytes(); - let from = rocksdb::IteratorMode::From(lower_bound.as_ref(), rocksdb::Direction::Forward); - // Reading individual values from iterators does not seem to cause database hangs. - self.db - .iterator_cf(cf, from) - .next()? - .map(|(key_bytes, value_bytes)| { - Some((K::from_bytes(key_bytes), V::from_bytes(value_bytes))) - }) - .expect("unexpected database failure") + self.zs_range_iter(cf, lower_bound.., false).next() } #[allow(clippy::unwrap_in_result)] @@ -379,17 +358,8 @@ impl ReadDisk for DiskDb { K: IntoDisk + FromDisk, V: FromDisk, { - let upper_bound = upper_bound.as_bytes(); - let from = rocksdb::IteratorMode::From(upper_bound.as_ref(), rocksdb::Direction::Reverse); - // Reading individual values from iterators does not seem to cause database hangs. - self.db - .iterator_cf(cf, from) - .next()? - .map(|(key_bytes, value_bytes)| { - Some((K::from_bytes(key_bytes), V::from_bytes(value_bytes))) - }) - .expect("unexpected database failure") + self.zs_range_iter(cf, ..=upper_bound, false).next() } fn zs_items_in_range_ordered(&self, cf: &C, range: R) -> BTreeMap @@ -399,7 +369,7 @@ impl ReadDisk for DiskDb { V: FromDisk, R: RangeBounds, { - self.zs_range_iter(cf, range).collect() + self.zs_range_iter(cf, range, false).collect() } fn zs_items_in_range_unordered(&self, cf: &C, range: R) -> HashMap @@ -409,7 +379,7 @@ impl ReadDisk for DiskDb { V: FromDisk, R: RangeBounds, { - self.zs_range_iter(cf, range).collect() + self.zs_range_iter(cf, range, false).collect() } } @@ -432,14 +402,19 @@ impl DiskDb { /// Returns an iterator over the items in `cf` in `range`. /// /// Holding this iterator open might delay block commit transactions. - pub fn zs_range_iter(&self, cf: &C, range: R) -> impl Iterator + '_ + pub fn zs_range_iter( + &self, + cf: &C, + range: R, + reverse: bool, + ) -> impl Iterator + '_ where C: rocksdb::AsColumnFamilyRef, K: IntoDisk + FromDisk, V: FromDisk, R: RangeBounds, { - self.zs_range_iter_with_direction(cf, range, false) + self.zs_range_iter_with_direction(cf, range, reverse) } /// Returns a reverse iterator over the items in `cf` in `range`. diff --git a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs index 75b5db8da64..59788640a33 100644 --- a/zebra-state/src/service/finalized_state/zebra_db/shielded.rs +++ b/zebra-state/src/service/finalized_state/zebra_db/shielded.rs @@ -209,7 +209,7 @@ impl ZebraDb { R: std::ops::RangeBounds, { let sapling_trees = self.db.cf_handle("sapling_note_commitment_tree").unwrap(); - self.db.zs_range_iter(&sapling_trees, range) + self.db.zs_range_iter(&sapling_trees, range, false) } /// Returns the Sapling note commitment trees in the reversed range, in decreasing height order. @@ -282,7 +282,7 @@ impl ZebraDb { if let Some(exclusive_end_bound) = exclusive_end_bound { list = self .db - .zs_range_iter(&sapling_subtrees, start_index..exclusive_end_bound) + .zs_range_iter(&sapling_subtrees, start_index..exclusive_end_bound, false) .collect(); } else { // If there is no end bound, just return all the trees. @@ -291,7 +291,7 @@ impl ZebraDb { // the trees run out.) list = self .db - .zs_range_iter(&sapling_subtrees, start_index..) + .zs_range_iter(&sapling_subtrees, start_index.., false) .collect(); } @@ -376,7 +376,7 @@ impl ZebraDb { R: std::ops::RangeBounds, { let orchard_trees = self.db.cf_handle("orchard_note_commitment_tree").unwrap(); - self.db.zs_range_iter(&orchard_trees, range) + self.db.zs_range_iter(&orchard_trees, range, false) } /// Returns the Orchard note commitment trees in the reversed range, in decreasing height order. @@ -449,7 +449,7 @@ impl ZebraDb { if let Some(exclusive_end_bound) = exclusive_end_bound { list = self .db - .zs_range_iter(&orchard_subtrees, start_index..exclusive_end_bound) + .zs_range_iter(&orchard_subtrees, start_index..exclusive_end_bound, false) .collect(); } else { // If there is no end bound, just return all the trees. @@ -458,7 +458,7 @@ impl ZebraDb { // the trees run out.) list = self .db - .zs_range_iter(&orchard_subtrees, start_index..) + .zs_range_iter(&orchard_subtrees, start_index.., false) .collect(); } From eac74b8e20f06826a546716f51044af111ae3dcd Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 11 Oct 2023 21:39:21 +0100 Subject: [PATCH 32/59] fix(ci): use the `entrypoint.sh` to change the Network --- .github/workflows/ci-unit-tests-docker.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 12806a035e0..1dba4a69b2b 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -301,6 +301,7 @@ jobs: echo "An error occurred while processing the logs."; exit 1; env: + ZEBRA_CONF_PATH: '/etc/zebrad/zebrad.toml' NETWORK: Testnet # Test that Zebra works using the $ZEBRA_CONF_PATH config From ecb938bb2aff495381812453fcaf4f524df29e54 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 11 Oct 2023 22:50:54 +0100 Subject: [PATCH 33/59] fix(ci): add missing `ZEBRA_CONF_PATH` variable --- .github/workflows/ci-unit-tests-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 1dba4a69b2b..8e1bacbde07 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -272,7 +272,7 @@ jobs: - name: Run tests using a testnet config run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start + docker run -e NETWORK -e ZEBRA_CONF_PATH --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start # Use a subshell to handle the broken pipe error gracefully ( trap "" PIPE; From defb314e794b2b1a106de7c102390114e074d7da Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 11 Oct 2023 23:25:17 +0100 Subject: [PATCH 34/59] fix(ci): considerate new updates to jobs --- .../sub-deploy-integration-tests-gcp.yml | 59 +++++++++---------- 1 file changed, 28 insertions(+), 31 deletions(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index cb1f62f59c6..7cffe34a85a 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -257,14 +257,14 @@ jobs: # if the test needs it. - name: Create ${{ inputs.test_id }} GCP compute instance id: create-instance - shell: /usr/bin/bash -ex {0} + shell: /usr/bin/bash -x {0} run: | gcloud compute instances create-with-container "${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }}" \ --boot-disk-size 50GB \ --boot-disk-type pd-ssd \ --image-project=cos-cloud \ --image-family=cos-stable \ - --create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=300GB,type=pd-ssd \ + --create-disk=${DISK_OPTION}name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",device-name="${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }}",size=400GB,type=pd-ssd \ --container-image=gcr.io/google-containers/busybox \ --machine-type ${{ vars.GCP_LARGE_MACHINE }} \ --network-interface=subnet=${{ vars.GCP_SUBNETWORK }} \ @@ -285,14 +285,12 @@ jobs: --ssh-flag="-o ServerAliveInterval=5" \ --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ - --command \ - "\ - while sudo lsof /dev/sdb; do \ - echo 'Waiting for /dev/sdb to be free...'; \ - sleep 10; \ - done; \ - sudo mkfs.ext4 -v /dev/sdb \ - " + --command=' \ + set -ex; + # Extract the correct disk name based on the device-name + DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-); + sudo mkfs.ext4 -v /dev/$DISK_NAME \ + ' # Launch the test with the previously created disk or cached state. # @@ -325,22 +323,26 @@ jobs: # the inputs like ${{ inputs.zebra_state_dir }} and ${{ inputs.lwd_state_dir }} # are only used to match those variables paths. - name: Launch ${{ inputs.test_id }} test - shell: /usr/bin/bash -ex {0} + id: launch-test + shell: /usr/bin/bash -x {0} run: | - MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/sdb,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}" - - # Check if we need to mount for Lightwalletd state - if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then - MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/sdb,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}" - fi - gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ --ssh-flag="-o ServerAliveInterval=5" \ --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ - --command \ - '\ + --command=' \ + + # Extract the correct disk name based on the device-name + DISK_NAME=$(ls -l /dev/disk/by-id | grep -oE "google-${{ inputs.test_id }}-${{ env.GITHUB_SHA_SHORT }} -> ../../[^ ]+" | grep -oE "/[^/]+$" | cut -c 2-) + + MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}" + + # Check if we need to mount for Lightwalletd state + if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then + MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}" + fi + sudo docker run \ --name ${{ inputs.test_id }} \ --tty \ @@ -352,8 +354,8 @@ jobs: # Show debug logs if previous job failed - name: Show debug logs if previous job failed - if: ${{ failure() && (inputs.needs_zebra_state && inputs.needs_lwd_state) || inputs.test_id == 'lwd-full-sync' }} - shell: /usr/bin/bash -exo pipefail {0} + if: ${{ failure() }} + shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -380,7 +382,7 @@ jobs: # # Errors in the tests are caught by the final test status job. - name: Check startup logs for ${{ inputs.test_id }} - shell: /usr/bin/bash -exo pipefail {0} + shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -388,10 +390,6 @@ jobs: --ssh-flag="-o ConnectionAttempts=20" \ --ssh-flag="-o ConnectTimeout=5" \ --command=' \ - trap "" PIPE; - - # Temporarily disable "set -e" to handle the broken pipe error gracefully - set +e; sudo docker logs \ --tail all \ --follow \ @@ -413,7 +411,7 @@ jobs: # with that status. # (`docker wait` can also wait for multiple containers, but we only ever wait for a single container.) - name: Result of ${{ inputs.test_id }} test - shell: /usr/bin/bash -exo pipefail {0} + shell: /usr/bin/bash -x {0} run: | gcloud compute ssh ${{ inputs.test_id }}-${{ env.GITHUB_REF_SLUG_URL }}-${{ env.GITHUB_SHA_SHORT }} \ --zone ${{ vars.GCP_ZONE }} \ @@ -429,7 +427,6 @@ jobs: grep --max-count=1 --extended-regexp --color=always \ "test result: .*ok.* [1-9][0-9]* passed.*finished in"; LOGS_EXIT_STATUS=$?; - set -e; EXIT_STATUS=$(sudo docker wait ${{ inputs.test_id }} || echo "Error retrieving exit status"); echo "sudo docker exit status: $EXIT_STATUS"; @@ -555,7 +552,7 @@ jobs: # Passes the versions to subsequent steps using the $INITIAL_DISK_DB_VERSION, # $RUNNING_DB_VERSION, and $DB_VERSION_SUMMARY env variables. - name: Get database versions from logs - shell: /usr/bin/bash -exo pipefail {0} + shell: /usr/bin/bash -x {0} run: | INITIAL_DISK_DB_VERSION="" RUNNING_DB_VERSION="" @@ -645,7 +642,7 @@ jobs: # # Passes the sync height to subsequent steps using the $SYNC_HEIGHT env variable. - name: Get sync height from logs - shell: /usr/bin/bash -exo pipefail {0} + shell: /usr/bin/bash -x {0} run: | SYNC_HEIGHT="" From 6fe2870d269fd6427119d3d75aae3d1e40e4b54a Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 11 Oct 2023 23:45:58 +0100 Subject: [PATCH 35/59] fix(ci): allow to build the entrypoint file with testnet --- .github/workflows/ci-unit-tests-docker.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 8e1bacbde07..997be2a1b01 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -272,7 +272,7 @@ jobs: - name: Run tests using a testnet config run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK -e ZEBRA_CONF_PATH --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start + docker run -e NETWORK -e ZEBRA_CONF_PATH -e ENTRYPOINT_FEATURES='' --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start # Use a subshell to handle the broken pipe error gracefully ( trap "" PIPE; From 97e7a14b6c1611297ae5df0005c2d9e020ec7e3e Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 12 Oct 2023 00:17:01 +0100 Subject: [PATCH 36/59] fix(entrypoint): allow to create a dir and file with a single variable --- docker/entrypoint.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docker/entrypoint.sh b/docker/entrypoint.sh index 66b8f586586..1271e01a982 100755 --- a/docker/entrypoint.sh +++ b/docker/entrypoint.sh @@ -90,8 +90,7 @@ fi # Users have to opt-in to additional functionality by setting environmental variables. if [[ -n "${ZEBRA_CONF_PATH}" ]] && [[ ! -f "${ZEBRA_CONF_PATH}" ]] && [[ -z "${ENTRYPOINT_FEATURES}" ]]; then # Create the conf path and file - mkdir -p "${ZEBRA_CONF_DIR}" || { echo "Error creating directory ${ZEBRA_CONF_DIR}"; exit 1; } - touch "${ZEBRA_CONF_PATH}" || { echo "Error creating file ${ZEBRA_CONF_PATH}"; exit 1; } + (mkdir -p "$(dirname "${ZEBRA_CONF_PATH}")" && touch "${ZEBRA_CONF_PATH}") || { echo "Error creating file ${ZEBRA_CONF_PATH}"; exit 1; } # Populate the conf file cat < "${ZEBRA_CONF_PATH}" [network] From ec92cf8357de37e8b0b051b3cf2d4dcd00be56c7 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 12 Oct 2023 00:31:11 +0100 Subject: [PATCH 37/59] fix(ci): add missing jobs to `failure-issue` --- .github/workflows/ci-integration-tests-gcp.yml | 2 +- .github/workflows/ci-unit-tests-docker.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci-integration-tests-gcp.yml b/.github/workflows/ci-integration-tests-gcp.yml index 1132762c210..f58e35bb9d3 100644 --- a/.github/workflows/ci-integration-tests-gcp.yml +++ b/.github/workflows/ci-integration-tests-gcp.yml @@ -596,7 +596,7 @@ jobs: # # This list is for reliable tests that are run on the `main` branch. # Testnet jobs are not in this list, because we expect testnet to fail occasionally. - needs: [ regenerate-stateful-disks, test-full-sync, lightwalletd-full-sync, test-stateful-sync, test-update-sync, checkpoints-mainnet, lightwalletd-update-sync, lightwalletd-rpc-test, lightwalletd-transactions-test, lightwalletd-grpc-test, get-block-template-test, submit-block-test ] + needs: [ regenerate-stateful-disks, test-stateful-sync, test-full-sync, test-update-sync, checkpoints-mainnet, test-full-sync-testnet, checkpoints-testnet, lightwalletd-full-sync, lightwalletd-update-sync, lightwalletd-rpc-test, lightwalletd-transactions-test, lightwalletd-grpc-test, get-block-template-test, submit-block-test ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) if: (failure() && github.event.pull_request == null) || (cancelled() && github.event.pull_request == null) diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 1dba4a69b2b..41493dc1b31 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -337,7 +337,7 @@ jobs: # # This list is for reliable tests that are run on the `main` branch. # Testnet jobs are not in this list, because we expect testnet to fail occasionally. - needs: [ test-all, test-all-getblocktemplate-rpcs, test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, test-configuration-file, test-zebra-conf-path, test-configuration-file-testnet ] + needs: [ test-all, test-all-getblocktemplate-rpcs, test-fake-activation-heights, test-empty-sync, test-lightwalletd-integration, test-configuration-file, test-configuration-file-testnet, test-zebra-conf-path ] # Only open tickets for failed scheduled jobs, manual workflow runs, or `main` branch merges. # (PR statuses are already reported in the PR jobs list, and checked by Mergify.) # TODO: if a job times out, we want to create a ticket. Does failure() do that? Or do we need cancelled()? From 1ce960b2342c37d01db85855285ff6db01c3a36b Mon Sep 17 00:00:00 2001 From: arya2 Date: Wed, 11 Oct 2023 19:49:47 -0400 Subject: [PATCH 38/59] fixes bug --- zebra-state/src/service/finalized_state/disk_db.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 806cc9536d4..68944d79ce1 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -359,7 +359,7 @@ impl ReadDisk for DiskDb { V: FromDisk, { // Reading individual values from iterators does not seem to cause database hangs. - self.zs_range_iter(cf, ..=upper_bound, false).next() + self.zs_range_iter(cf, ..=upper_bound, true).next() } fn zs_items_in_range_ordered(&self, cf: &C, range: R) -> BTreeMap From bd80289b0fecf4e08d723f9a53a27597634500c9 Mon Sep 17 00:00:00 2001 From: arya2 Date: Wed, 11 Oct 2023 22:05:08 -0400 Subject: [PATCH 39/59] Fixes bug in zs_iter_opts --- .../src/service/finalized_state/disk_db.rs | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 68944d79ce1..b4a791e009e 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -506,16 +506,16 @@ impl DiskDb { match range.end_bound().cloned() { Included(mut bound) => { - // Increment the last byte in the vector that is not u8::MAX, or - // skip adding an upper bound if every byte is u8::MAX - if let Some(increment_idx) = bound.iter().rposition(|&v| v != u8::MAX) { - let increment_byte = bound - .get_mut(increment_idx) - .expect("index should be in bounds"); - *increment_byte = increment_byte - .checked_add(1) - .expect("adding 1 should succeed"); - + // Skip adding an upper bound if every byte is u8::MAX, or + // increment the last byte in the upper bound that is less than u8::MAX, + // and clear any bytes after it to increment the big-endian number this + // string represents to RocksDB. + let is_max_key = bound.iter_mut().rev().all(|v| { + *v = v.wrapping_add(1); + v == &0 + }); + + if !is_max_key { opts.set_iterate_upper_bound(bound); } } From a703d1ce996256b02f13d707d51cde17d8d33c64 Mon Sep 17 00:00:00 2001 From: arya2 Date: Wed, 11 Oct 2023 23:11:51 -0400 Subject: [PATCH 40/59] Adds test & updates method docs --- .../src/service/finalized_state/disk_db.rs | 44 +++++++++++++------ .../service/finalized_state/disk_db/tests.rs | 25 +++++++++++ 2 files changed, 56 insertions(+), 13 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index b4a791e009e..8016fa3de8b 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -401,6 +401,10 @@ impl DiskWriteBatch { impl DiskDb { /// Returns an iterator over the items in `cf` in `range`. /// + /// Accepts a `reverse` argument and creates the iterator with an [`IteratorMode`](rocksdb::IteratorMode) + /// of [`End`](rocksdb::IteratorMode::End), or [`From`](rocksdb::IteratorMode::From) + /// with [`Direction::Reverse`](rocksdb::Direction::Reverse). + /// /// Holding this iterator open might delay block commit transactions. pub fn zs_range_iter( &self, @@ -496,15 +500,33 @@ impl DiskDb { where R: RangeBounds>, { - use std::ops::Bound::*; - let mut opts = ReadOptions::default(); + let (lower_bound, upper_bound) = Self::zs_iter_bounds(range); + + if let Some(bound) = lower_bound { + opts.set_iterate_lower_bound(bound); + }; + + if let Some(bound) = upper_bound { + opts.set_iterate_upper_bound(bound); + }; + + opts + } - if let Included(bound) | Excluded(bound) = range.start_bound() { - opts.set_iterate_lower_bound(bound.clone()); + /// Returns a lower and upper iterate bounds for a range. + fn zs_iter_bounds(range: &R) -> (Option>, Option>) + where + R: RangeBounds>, + { + use std::ops::Bound::*; + + let lower_bound = match range.start_bound() { + Included(bound) | Excluded(bound) => Some(bound.clone()), + Unbounded => None, }; - match range.end_bound().cloned() { + let upper_bound = match range.end_bound().cloned() { Included(mut bound) => { // Skip adding an upper bound if every byte is u8::MAX, or // increment the last byte in the upper bound that is less than u8::MAX, @@ -515,17 +537,13 @@ impl DiskDb { v == &0 }); - if !is_max_key { - opts.set_iterate_upper_bound(bound); - } + (!is_max_key).then_some(bound) } - Excluded(bound) => { - opts.set_iterate_upper_bound(bound); - } - Unbounded => {} + Excluded(bound) => Some(bound), + Unbounded => None, }; - opts + (lower_bound, upper_bound) } /// Returns the RocksDB iterator "from" mode for `range`. diff --git a/zebra-state/src/service/finalized_state/disk_db/tests.rs b/zebra-state/src/service/finalized_state/disk_db/tests.rs index 17613e8b3b5..2b5567217a8 100644 --- a/zebra-state/src/service/finalized_state/disk_db/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_db/tests.rs @@ -24,3 +24,28 @@ impl DiskDb { rocksdb::DB::list_cf(&opts, path) } } + +/// Check that the sprout tree database serialization format has not changed. +#[test] +fn zs_iter_opts_increments_key_by_one() { + let _init_guard = zebra_test::init(); + + // let (config, network) = Default::default(); + // let db = DiskDb::new(&config, network); + + let keys: [u32; 13] = [ + 0, 1, 200, 255, 256, 257, 65535, 65536, 65537, 16777215, 16777216, 16777217, 16777218, + ]; + + for key in keys { + let (_, upper_bound_bytes) = DiskDb::zs_iter_bounds(&..=key.to_be_bytes().to_vec()); + let upper_bound_bytes = upper_bound_bytes.expect("there should be an upper bound"); + let upper_bound = u32::from_be_bytes(upper_bound_bytes.try_into().unwrap()); + let expected_upper_bound = key + 1; + + assert_eq!( + expected_upper_bound, upper_bound, + "the upper bound should be 1 greater than the original key" + ); + } +} From 7814d5fde5bcd7bfaf6fbcef2a159bcfd871e3e2 Mon Sep 17 00:00:00 2001 From: arya2 Date: Wed, 11 Oct 2023 23:20:21 -0400 Subject: [PATCH 41/59] updates docs --- zebra-state/src/service/finalized_state/disk_db.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 8016fa3de8b..b88fdefe130 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -515,6 +515,11 @@ impl DiskDb { } /// Returns a lower and upper iterate bounds for a range. + /// + /// Note: Since upper iterate bounds are always exclusive in RocksDB, this method + /// will increment the upper bound by 1 if the end bound of the provided range + /// is inclusive, or will return an upper bound of `None` if the end bound of a + /// provided range is inclusive and already the max key for that column family. fn zs_iter_bounds(range: &R) -> (Option>, Option>) where R: RangeBounds>, @@ -531,7 +536,7 @@ impl DiskDb { // Skip adding an upper bound if every byte is u8::MAX, or // increment the last byte in the upper bound that is less than u8::MAX, // and clear any bytes after it to increment the big-endian number this - // string represents to RocksDB. + // Vec represents to RocksDB. let is_max_key = bound.iter_mut().rev().all(|v| { *v = v.wrapping_add(1); v == &0 From b3fc1cd4138fe00bda3652b3a785791811fcf30a Mon Sep 17 00:00:00 2001 From: Arya Date: Wed, 11 Oct 2023 23:28:05 -0400 Subject: [PATCH 42/59] Update zebra-state/src/service/finalized_state/disk_db/tests.rs --- zebra-state/src/service/finalized_state/disk_db/tests.rs | 3 --- 1 file changed, 3 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db/tests.rs b/zebra-state/src/service/finalized_state/disk_db/tests.rs index 2b5567217a8..c2d1a04d323 100644 --- a/zebra-state/src/service/finalized_state/disk_db/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_db/tests.rs @@ -30,9 +30,6 @@ impl DiskDb { fn zs_iter_opts_increments_key_by_one() { let _init_guard = zebra_test::init(); - // let (config, network) = Default::default(); - // let db = DiskDb::new(&config, network); - let keys: [u32; 13] = [ 0, 1, 200, 255, 256, 257, 65535, 65536, 65537, 16777215, 16777216, 16777217, 16777218, ]; From 59dba97c7292bebbbad07407f4ab7ff38253992f Mon Sep 17 00:00:00 2001 From: arya2 Date: Thu, 12 Oct 2023 13:53:50 -0400 Subject: [PATCH 43/59] Corrects code comment --- zebra-state/src/service/finalized_state/disk_db/tests.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/zebra-state/src/service/finalized_state/disk_db/tests.rs b/zebra-state/src/service/finalized_state/disk_db/tests.rs index c2d1a04d323..4ba22c39ac7 100644 --- a/zebra-state/src/service/finalized_state/disk_db/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_db/tests.rs @@ -25,7 +25,7 @@ impl DiskDb { } } -/// Check that the sprout tree database serialization format has not changed. +/// Check that zs_iter_opts returns an upper bound one greater than provided inclusive end bounds. #[test] fn zs_iter_opts_increments_key_by_one() { let _init_guard = zebra_test::init(); From f3eac7cf2b3335437718dda12294289a61241edb Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 12 Oct 2023 21:21:38 +0100 Subject: [PATCH 44/59] Apply suggestions from code review Co-authored-by: teor --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index 7cffe34a85a..d0654aaa42b 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -315,7 +315,7 @@ jobs: # subdirectories for their data. (But Zebra, lightwalletd, and the test harness must not # delete the whole cache directory.) # - # This path must match the variable used by the tests in Rust, which are also set in + # These paths must match the variables used by the tests in Rust, which are also set in # `ci-unit-tests-docker.yml` to be able to run this tests. # # Although we're mounting the disk root to both directories, Zebra and Lightwalletd @@ -339,6 +339,7 @@ jobs: MOUNT_FLAGS="--mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }}" # Check if we need to mount for Lightwalletd state + # lightwalletd-full-sync reads Zebra and writes lwd, so it is handled specially. if [[ "${{ inputs.needs_lwd_state }}" == "true" || "${{ inputs.test_id }}" == "lwd-full-sync" ]]; then MOUNT_FLAGS="$MOUNT_FLAGS --mount type=volume,volume-driver=local,volume-opt=device=/dev/$DISK_NAME,volume-opt=type=ext4,dst=${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }}" fi From a32b3742ebf75221d3a60d7a1c033c7fe3e0a789 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Thu, 12 Oct 2023 21:28:02 +0100 Subject: [PATCH 45/59] fix(ci): use better comment --- .github/workflows/sub-deploy-integration-tests-gcp.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/sub-deploy-integration-tests-gcp.yml b/.github/workflows/sub-deploy-integration-tests-gcp.yml index d0654aaa42b..336bbe6b360 100644 --- a/.github/workflows/sub-deploy-integration-tests-gcp.yml +++ b/.github/workflows/sub-deploy-integration-tests-gcp.yml @@ -304,7 +304,7 @@ jobs: # VM and to the container might require more steps in this workflow, and additional # considerations. # - # The disk mounted in the VM is located at /dev/sdb, we mount the root `/` of this disk to the docker + # The disk mounted in the VM is located at /dev/$DISK_NAME, we mount the root `/` of this disk to the docker # container, and might have two different paths (if lightwalletd state is needed): # - /var/cache/zebrad-cache -> ${{ inputs.root_state_path }}/${{ inputs.zebra_state_dir }} -> $ZEBRA_CACHED_STATE_DIR # - /var/cache/lwd-cache -> ${{ inputs.root_state_path }}/${{ inputs.lwd_state_dir }} -> $LIGHTWALLETD_DATA_DIR From fcf171f10be906f0c77041401ad1b59d5adadbaf Mon Sep 17 00:00:00 2001 From: arya2 Date: Thu, 12 Oct 2023 23:57:29 -0400 Subject: [PATCH 46/59] adds support for variable-sized keys --- zebra-state/src/service/finalized_state/disk_db.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index b88fdefe130..7ee2554b226 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -537,12 +537,17 @@ impl DiskDb { // increment the last byte in the upper bound that is less than u8::MAX, // and clear any bytes after it to increment the big-endian number this // Vec represents to RocksDB. - let is_max_key = bound.iter_mut().rev().all(|v| { + let is_zero = bound.iter_mut().rev().all(|v| { *v = v.wrapping_add(1); v == &0 }); - (!is_max_key).then_some(bound) + if is_zero { + bound.push(0); + *bound.get_mut(0).expect("should have at least 1 element") += 1; + } + + Some(bound) } Excluded(bound) => Some(bound), Unbounded => None, From 600aa5f6087dc0b6af9d9fcce54b614cedaee5c4 Mon Sep 17 00:00:00 2001 From: arya2 Date: Fri, 13 Oct 2023 00:14:36 -0400 Subject: [PATCH 47/59] adds test case --- .../service/finalized_state/disk_db/tests.rs | 32 ++++++++++++++++--- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db/tests.rs b/zebra-state/src/service/finalized_state/disk_db/tests.rs index 4ba22c39ac7..ebfa44ede75 100644 --- a/zebra-state/src/service/finalized_state/disk_db/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_db/tests.rs @@ -30,19 +30,41 @@ impl DiskDb { fn zs_iter_opts_increments_key_by_one() { let _init_guard = zebra_test::init(); - let keys: [u32; 13] = [ - 0, 1, 200, 255, 256, 257, 65535, 65536, 65537, 16777215, 16777216, 16777217, 16777218, + let keys: [u32; 14] = [ + 0, + 1, + 200, + 255, + 256, + 257, + 65535, + 65536, + 65537, + 16777215, + 16777216, + 16777217, + 16777218, + u32::MAX, ]; for key in keys { - let (_, upper_bound_bytes) = DiskDb::zs_iter_bounds(&..=key.to_be_bytes().to_vec()); - let upper_bound_bytes = upper_bound_bytes.expect("there should be an upper bound"); + let (_, bytes) = DiskDb::zs_iter_bounds(&..=key.to_be_bytes().to_vec()); + let mut bytes = bytes.expect("there should be an upper bound"); + let upper_bound_bytes = bytes.split_off(bytes.len() - 4); let upper_bound = u32::from_be_bytes(upper_bound_bytes.try_into().unwrap()); - let expected_upper_bound = key + 1; + let expected_upper_bound = key.wrapping_add(1); assert_eq!( expected_upper_bound, upper_bound, "the upper bound should be 1 greater than the original key" ); + + if expected_upper_bound == 0 { + assert_eq!( + bytes, + vec![1], + "there should be an extra byte with a value of 1" + ); + } } } From 43ae06e7387d5b34b8c16f141b1017a595b5bd52 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 17 Oct 2023 12:18:28 +0100 Subject: [PATCH 48/59] refactor: test config file in CI and CD with a reusable workflow --- .github/workflows/cd-deploy-nodes-gcp.yml | 49 ++++++++ .github/workflows/ci-unit-tests-docker.yml | 123 ++++---------------- .github/workflows/sub-test-zebra-config.yml | 75 ++++++++++++ 3 files changed, 148 insertions(+), 99 deletions(-) create mode 100644 .github/workflows/sub-test-zebra-config.yml diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index d22db72832c..75078d19192 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -96,6 +96,55 @@ jobs: no_cache: ${{ inputs.no_cache || false }} rust_log: info + # Test that Zebra works using the default config with the latest Zebra version. + test-configuration-file: + name: Test Zebra default Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - name: Test Zebra with default config file + uses: ./.github/workflows/test-zebra-config-files.yml + with: + test_id: 'default-conf-tests' + test_description: 'Test Zebra default Docker config file' + grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK' + network: 'Mainnet' + + # Test reconfiguring the the docker image for tesnet. + test-configuration-file-testnet: + name: Test Zebra testnet Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + # Make sure Zebra can sync the genesis block on testnet + - name: Test Zebra with Testnet config file + uses: ./.github/workflows/test-zebra-config-files.yml + with: + test_id: 'testnet-conf-tests' + test_description: 'Test Zebra testnet Docker config file' + grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml"' + network: 'Testnet' + + # Test that Zebra works using $ZEBRA_CONF_PATH config + test-zebra-conf-path: + name: Test Zebra custom Docker config file + timeout-minutes: 15 + runs-on: ubuntu-latest + needs: build + steps: + - name: Test Zebra with $ZEBRA_CONF_PATH config file + uses: ./.github/workflows/test-zebra-config-files.yml + with: + test_id: 'variable-conf-tests' + test_description: 'Test Zebra custom Docker config file' + grep_patterns: '-e "v1.0.0-rc.2.toml"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} + # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, # with one node in the configured GCP region. # diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 997be2a1b01..83563398537 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -214,122 +214,47 @@ jobs: runs-on: ubuntu-latest needs: build steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + - name: Test Zebra with default config file + uses: ./.github/workflows/test-zebra-config-files.yml with: - short-length: 7 - - - name: Run tests using the default config - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach --name default-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start - # Use a subshell to handle the broken pipe error gracefully - ( - trap "" PIPE; - docker logs \ - --tail all \ - --follow \ - default-conf-tests | \ - tee --output-error=exit /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - -e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter" - ) || true - LOGS_EXIT_STATUS=$? - - docker stop default-conf-tests - - EXIT_STATUS=$(docker wait default-conf-tests || echo "Error retrieving exit status"); - echo "docker exit status: $EXIT_STATUS"; + test_id: 'default-conf-tests' + test_description: 'Test Zebra default Docker config file' + grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK' + network: 'Mainnet' - # If grep found the pattern, exit with the Docker container exit status - if [ $LOGS_EXIT_STATUS -eq 0 ]; then - exit $EXIT_STATUS; - fi - - # Handle other potential errors here - echo "An error occurred while processing the logs."; - exit 1; - env: - NETWORK: Mainnet - - # Test reconfiguring the docker image for testnet. + # Test reconfiguring the the docker image for tesnet. test-configuration-file-testnet: name: Test Zebra testnet Docker config file timeout-minutes: 15 runs-on: ubuntu-latest needs: build steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 - with: - short-length: 7 - # Make sure Zebra can sync the genesis block on testnet - - name: Run tests using a testnet config - run: | - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK -e ZEBRA_CONF_PATH -e ENTRYPOINT_FEATURES='' --detach --name testnet-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start - # Use a subshell to handle the broken pipe error gracefully - ( - trap "" PIPE; - docker logs \ - --tail all \ - --follow \ - testnet-conf-tests | \ - tee --output-error=exit /dev/stderr | \ - grep --max-count=1 --extended-regexp --color=always \ - -e "net.*=.*Test.*estimated progress to chain tip.*Genesis" \ - -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"; - ) || true - LOGS_EXIT_STATUS=$? - - docker stop testnet-conf-tests - - EXIT_STATUS=$(docker wait testnet-conf-tests || echo "Error retrieving exit status"); - echo "docker exit status: $EXIT_STATUS"; - - # If grep found the pattern, exit with the Docker container exit status - if [ $LOGS_EXIT_STATUS -eq 0 ]; then - exit $EXIT_STATUS; - fi - - # Handle other potential errors here - echo "An error occurred while processing the logs."; - exit 1; - env: - ZEBRA_CONF_PATH: '/etc/zebrad/zebrad.toml' - NETWORK: Testnet + - name: Test Zebra with Testnet config file + uses: ./.github/workflows/test-zebra-config-files.yml + with: + test_id: 'testnet-conf-tests' + test_description: 'Test Zebra testnet Docker config file' + grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml"' + network: 'Testnet' - # Test that Zebra works using the $ZEBRA_CONF_PATH config + # Test that Zebra works using $ZEBRA_CONF_PATH config test-zebra-conf-path: name: Test Zebra custom Docker config file timeout-minutes: 15 runs-on: ubuntu-latest needs: build steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 - - - name: Inject slug/short variables - uses: rlespinasse/github-slug-action@v4 + - name: Test Zebra with $ZEBRA_CONF_PATH config file + uses: ./.github/workflows/test-zebra-config-files.yml with: - short-length: 7 - - - name: Run tests using the $ZEBRA_CONF_PATH - run: | - set -ex - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run -e NETWORK --detach -e ZEBRA_CONF_PATH --name variable-conf-tests -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start - EXIT_STATUS=$(docker logs --tail all --follow variable-conf-tests 2>&1 | grep -q --extended-regexp --max-count=1 -e 'v1.0.0-rc.2.toml'; echo $?; ) - docker stop variable-conf-tests - docker logs variable-conf-tests - exit "$EXIT_STATUS" - env: - ZEBRA_CONF_PATH: 'zebrad/tests/common/configs/v1.0.0-rc.2.toml' - NETWORK: ${{ inputs.network || vars.ZCASH_NETWORK }} + test_id: 'variable-conf-tests' + test_description: 'Test Zebra custom Docker config file' + grep_patterns: '-e "v1.0.0-rc.2.toml"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} failure-issue: name: Open or update issues for main branch failures diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml new file mode 100644 index 00000000000..31e9c73eceb --- /dev/null +++ b/.github/workflows/sub-test-zebra-config.yml @@ -0,0 +1,75 @@ +name: Test Zebra Config Files + +on: + workflow_call: + inputs: + # Status and logging + test_id: + required: true + type: string + description: 'Unique identifier for the test' + test_description: + required: true + type: string + description: 'Explains what the test does' + grep_patterns: + required: true + type: string + description: 'Patterns to grep for in the logs' + + # Test selection and parameters + test_variables: + required: true + type: string + description: 'Environmental variables used to select and configure the test' + network: + required: false + type: string + default: Mainnet + description: 'Zcash network to test against' + +jobs: + test-zebra-config: + name: Test Zebra Docker Config + timeout-minutes: 15 + runs-on: ubuntu-latest + steps: + - uses: r7kamura/rust-problem-matchers@v1.4.0 + + - name: Inject slug/short variables + uses: rlespinasse/github-slug-action@v4 + with: + short-length: 7 + + - name: Run ${{ inputs.test_id }} config test + run: | + docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} + docker run ${{ inputs.test_variables }} --detach --name ${{ inputs.test_id }} -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start + # Use a subshell to handle the broken pipe error gracefully + ( + trap "" PIPE; + docker logs \ + --tail all \ + --follow \ + ${{ inputs.test_id }} | \ + tee --output-error=exit /dev/stderr | \ + grep --max-count=1 --extended-regexp --color=always \ + ${{ inputs.grep_patterns }} + ) || true + LOGS_EXIT_STATUS=$? + + docker stop ${{ inputs.test_id }} + + EXIT_STATUS=$(docker wait ${{ inputs.test_id }} || echo "Error retrieving exit status"); + echo "docker exit status: $EXIT_STATUS"; + + # If grep found the pattern, exit with the Docker container exit status + if [ $LOGS_EXIT_STATUS -eq 0 ]; then + exit $EXIT_STATUS; + fi + + # Handle other potential errors here + echo "An error occurred while processing the logs."; + exit 1; + env: + NETWORK: '${{ inputs.network }}' From 408234082b4369ea7ea2904426d1622caf8c6293 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 17 Oct 2023 12:52:03 +0100 Subject: [PATCH 49/59] fix(ci): wrong name used --- .github/workflows/cd-deploy-nodes-gcp.yml | 6 +++--- .github/workflows/ci-unit-tests-docker.yml | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 75078d19192..8310427bfe9 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -104,7 +104,7 @@ jobs: needs: build steps: - name: Test Zebra with default config file - uses: ./.github/workflows/test-zebra-config-files.yml + uses: ./.github/workflows/sub-test-zebra-config.yml with: test_id: 'default-conf-tests' test_description: 'Test Zebra default Docker config file' @@ -121,7 +121,7 @@ jobs: steps: # Make sure Zebra can sync the genesis block on testnet - name: Test Zebra with Testnet config file - uses: ./.github/workflows/test-zebra-config-files.yml + uses: ./.github/workflows/sub-test-zebra-config.yml with: test_id: 'testnet-conf-tests' test_description: 'Test Zebra testnet Docker config file' @@ -137,7 +137,7 @@ jobs: needs: build steps: - name: Test Zebra with $ZEBRA_CONF_PATH config file - uses: ./.github/workflows/test-zebra-config-files.yml + uses: ./.github/workflows/sub-test-zebra-config.yml with: test_id: 'variable-conf-tests' test_description: 'Test Zebra custom Docker config file' diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 83563398537..ac7ba27aa8d 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -215,7 +215,7 @@ jobs: needs: build steps: - name: Test Zebra with default config file - uses: ./.github/workflows/test-zebra-config-files.yml + uses: ./.github/workflows/sub-test-zebra-config.yml with: test_id: 'default-conf-tests' test_description: 'Test Zebra default Docker config file' @@ -232,7 +232,7 @@ jobs: steps: # Make sure Zebra can sync the genesis block on testnet - name: Test Zebra with Testnet config file - uses: ./.github/workflows/test-zebra-config-files.yml + uses: ./.github/workflows/sub-test-zebra-config.yml with: test_id: 'testnet-conf-tests' test_description: 'Test Zebra testnet Docker config file' @@ -248,7 +248,7 @@ jobs: needs: build steps: - name: Test Zebra with $ZEBRA_CONF_PATH config file - uses: ./.github/workflows/test-zebra-config-files.yml + uses: ./.github/workflows/sub-test-zebra-config.yml with: test_id: 'variable-conf-tests' test_description: 'Test Zebra custom Docker config file' From 2bbed8022784f51eec2508f154edd471a2d72e73 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 17 Oct 2023 15:19:12 +0100 Subject: [PATCH 50/59] fix(ci): use checkout --- .github/workflows/sub-test-zebra-config.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index 31e9c73eceb..1202a8e4d69 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -34,13 +34,17 @@ jobs: timeout-minutes: 15 runs-on: ubuntu-latest steps: - - uses: r7kamura/rust-problem-matchers@v1.4.0 + - uses: actions/checkout@v4.1.0 + with: + persist-credentials: false - name: Inject slug/short variables uses: rlespinasse/github-slug-action@v4 with: short-length: 7 + - uses: r7kamura/rust-problem-matchers@v1.4.0 + - name: Run ${{ inputs.test_id }} config test run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} From e7f617095040277842157105cbacb5cea1d59a53 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 17 Oct 2023 18:29:21 +0100 Subject: [PATCH 51/59] fix(ci): improve docker config tests --- .github/workflows/cd-deploy-nodes-gcp.yml | 55 ++++++++------------- .github/workflows/ci-unit-tests-docker.yml | 53 +++++++------------- .github/workflows/sub-test-zebra-config.yml | 4 -- 3 files changed, 39 insertions(+), 73 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index 8310427bfe9..a631d00b93c 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -99,51 +99,36 @@ jobs: # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: name: Test Zebra default Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest needs: build - steps: - - name: Test Zebra with default config file - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: 'default-conf-tests' - test_description: 'Test Zebra default Docker config file' - grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' - test_variables: '-e NETWORK' - network: 'Mainnet' + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'default-conf-tests' + grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK' + network: 'Mainnet' # Test reconfiguring the the docker image for tesnet. test-configuration-file-testnet: name: Test Zebra testnet Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest needs: build - steps: - # Make sure Zebra can sync the genesis block on testnet - - name: Test Zebra with Testnet config file - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: 'testnet-conf-tests' - test_description: 'Test Zebra testnet Docker config file' - grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' - test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml"' - network: 'Testnet' + # Make sure Zebra can sync the genesis block on testnet + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'testnet-conf-tests' + grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml"' + network: 'Testnet' # Test that Zebra works using $ZEBRA_CONF_PATH config test-zebra-conf-path: name: Test Zebra custom Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest needs: build - steps: - - name: Test Zebra with $ZEBRA_CONF_PATH config file - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: 'variable-conf-tests' - test_description: 'Test Zebra custom Docker config file' - grep_patterns: '-e "v1.0.0-rc.2.toml"' - test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'variable-conf-tests' + grep_patterns: '-e "v1.0.0-rc.2.toml"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} # Deploy Managed Instance Groups (MiGs) for Mainnet and Testnet, # with one node in the configured GCP region. @@ -165,7 +150,7 @@ jobs: matrix: network: [Mainnet, Testnet] name: Deploy ${{ matrix.network }} nodes - needs: [ build, versioning ] + needs: [ build, versioning, test-configuration-file, test-configuration-file-testnet, test-zebra-conf-path ] runs-on: ubuntu-latest timeout-minutes: 60 permissions: diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index ac7ba27aa8d..2ec9bdc195b 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -210,51 +210,36 @@ jobs: # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: name: Test Zebra default Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest needs: build - steps: - - name: Test Zebra with default config file - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: 'default-conf-tests' - test_description: 'Test Zebra default Docker config file' - grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' - test_variables: '-e NETWORK' - network: 'Mainnet' + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'default-conf-tests' + grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK' + network: 'Mainnet' # Test reconfiguring the the docker image for tesnet. test-configuration-file-testnet: name: Test Zebra testnet Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest needs: build - steps: - # Make sure Zebra can sync the genesis block on testnet - - name: Test Zebra with Testnet config file - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: 'testnet-conf-tests' - test_description: 'Test Zebra testnet Docker config file' - grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' - test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml"' - network: 'Testnet' + # Make sure Zebra can sync the genesis block on testnet + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'testnet-conf-tests' + grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml"' + network: 'Testnet' # Test that Zebra works using $ZEBRA_CONF_PATH config test-zebra-conf-path: name: Test Zebra custom Docker config file - timeout-minutes: 15 - runs-on: ubuntu-latest needs: build - steps: - - name: Test Zebra with $ZEBRA_CONF_PATH config file - uses: ./.github/workflows/sub-test-zebra-config.yml - with: - test_id: 'variable-conf-tests' - test_description: 'Test Zebra custom Docker config file' - grep_patterns: '-e "v1.0.0-rc.2.toml"' - test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' - network: ${{ inputs.network || vars.ZCASH_NETWORK }} + uses: ./.github/workflows/sub-test-zebra-config.yml + with: + test_id: 'variable-conf-tests' + grep_patterns: '-e "v1.0.0-rc.2.toml"' + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' + network: ${{ inputs.network || vars.ZCASH_NETWORK }} failure-issue: name: Open or update issues for main branch failures diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index 1202a8e4d69..29f88a33b17 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -8,10 +8,6 @@ on: required: true type: string description: 'Unique identifier for the test' - test_description: - required: true - type: string - description: 'Explains what the test does' grep_patterns: required: true type: string From 77b744d8217a26a32bcdc1f53baa6f482b05d66e Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Tue, 17 Oct 2023 19:13:06 +0100 Subject: [PATCH 52/59] fix(ci): use better name for protection rules --- .github/workflows/cd-deploy-nodes-gcp.yml | 6 +++--- .github/workflows/ci-unit-tests-docker.yml | 15 ++++++++------- .github/workflows/sub-test-zebra-config.yml | 6 +++--- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/.github/workflows/cd-deploy-nodes-gcp.yml b/.github/workflows/cd-deploy-nodes-gcp.yml index a631d00b93c..b57aa37e94a 100644 --- a/.github/workflows/cd-deploy-nodes-gcp.yml +++ b/.github/workflows/cd-deploy-nodes-gcp.yml @@ -102,7 +102,7 @@ jobs: needs: build uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'default-conf-tests' + test_id: 'default-conf' grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' test_variables: '-e NETWORK' network: 'Mainnet' @@ -114,7 +114,7 @@ jobs: # Make sure Zebra can sync the genesis block on testnet uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'testnet-conf-tests' + test_id: 'testnet-conf' grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml"' network: 'Testnet' @@ -125,7 +125,7 @@ jobs: needs: build uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'variable-conf-tests' + test_id: 'custom-conf' grep_patterns: '-e "v1.0.0-rc.2.toml"' test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' network: ${{ inputs.network || vars.ZCASH_NETWORK }} diff --git a/.github/workflows/ci-unit-tests-docker.yml b/.github/workflows/ci-unit-tests-docker.yml index 2ec9bdc195b..8bbb18e33a7 100644 --- a/.github/workflows/ci-unit-tests-docker.yml +++ b/.github/workflows/ci-unit-tests-docker.yml @@ -209,34 +209,35 @@ jobs: # Test that Zebra works using the default config with the latest Zebra version. test-configuration-file: - name: Test Zebra default Docker config file + name: Test default config file needs: build uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'default-conf-tests' + test_id: 'default-conf' grep_patterns: '-e "net.*=.*Main.*estimated progress to chain tip.*BeforeOverwinter"' test_variables: '-e NETWORK' network: 'Mainnet' # Test reconfiguring the the docker image for tesnet. test-configuration-file-testnet: - name: Test Zebra testnet Docker config file + name: Test testnet config file needs: build # Make sure Zebra can sync the genesis block on testnet uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'testnet-conf-tests' + test_id: 'testnet-conf' grep_patterns: '-e "net.*=.*Test.*estimated progress to chain tip.*Genesis" -e "net.*=.*Test.*estimated progress to chain tip.*BeforeOverwinter"' - test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml"' + # TODO: improve the entrypoint to avoid using `ENTRYPOINT_FEATURES=""` + test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="/etc/zebrad/zebrad.toml" -e ENTRYPOINT_FEATURES=""' network: 'Testnet' # Test that Zebra works using $ZEBRA_CONF_PATH config test-zebra-conf-path: - name: Test Zebra custom Docker config file + name: Test custom config file needs: build uses: ./.github/workflows/sub-test-zebra-config.yml with: - test_id: 'variable-conf-tests' + test_id: 'custom-conf' grep_patterns: '-e "v1.0.0-rc.2.toml"' test_variables: '-e NETWORK -e ZEBRA_CONF_PATH="zebrad/tests/common/configs/v1.0.0-rc.2.toml"' network: ${{ inputs.network || vars.ZCASH_NETWORK }} diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index 29f88a33b17..8aa9fd87e6d 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -25,8 +25,8 @@ on: description: 'Zcash network to test against' jobs: - test-zebra-config: - name: Test Zebra Docker Config + test-docker-config: + name: Test ${{ inputs.test_id }} in Docker timeout-minutes: 15 runs-on: ubuntu-latest steps: @@ -41,7 +41,7 @@ jobs: - uses: r7kamura/rust-problem-matchers@v1.4.0 - - name: Run ${{ inputs.test_id }} config test + - name: Run ${{ inputs.test_id }} test run: | docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} docker run ${{ inputs.test_variables }} --detach --name ${{ inputs.test_id }} -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start From 4d2de6ffdf7ecca93734308076625135de7c2c00 Mon Sep 17 00:00:00 2001 From: arya2 Date: Tue, 17 Oct 2023 17:57:58 -0400 Subject: [PATCH 53/59] Updates docs --- zebra-state/src/service/finalized_state/disk_db.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 7ee2554b226..8f0e1aa6c9f 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -518,8 +518,7 @@ impl DiskDb { /// /// Note: Since upper iterate bounds are always exclusive in RocksDB, this method /// will increment the upper bound by 1 if the end bound of the provided range - /// is inclusive, or will return an upper bound of `None` if the end bound of a - /// provided range is inclusive and already the max key for that column family. + /// is inclusive. fn zs_iter_bounds(range: &R) -> (Option>, Option>) where R: RangeBounds>, @@ -533,8 +532,7 @@ impl DiskDb { let upper_bound = match range.end_bound().cloned() { Included(mut bound) => { - // Skip adding an upper bound if every byte is u8::MAX, or - // increment the last byte in the upper bound that is less than u8::MAX, + // Increment the last byte in the upper bound that is less than u8::MAX, // and clear any bytes after it to increment the big-endian number this // Vec represents to RocksDB. let is_zero = bound.iter_mut().rev().all(|v| { From 9cf67fbb702870f89a95dc98b9e4ccaf6d321332 Mon Sep 17 00:00:00 2001 From: arya2 Date: Tue, 17 Oct 2023 18:07:00 -0400 Subject: [PATCH 54/59] Applies suggestions from code review --- .../src/service/finalized_state/disk_db.rs | 19 +++++++++---------- .../service/finalized_state/disk_db/tests.rs | 2 +- 2 files changed, 10 insertions(+), 11 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db.rs b/zebra-state/src/service/finalized_state/disk_db.rs index 8f0e1aa6c9f..c042ea6db0d 100644 --- a/zebra-state/src/service/finalized_state/disk_db.rs +++ b/zebra-state/src/service/finalized_state/disk_db.rs @@ -401,9 +401,9 @@ impl DiskWriteBatch { impl DiskDb { /// Returns an iterator over the items in `cf` in `range`. /// - /// Accepts a `reverse` argument and creates the iterator with an [`IteratorMode`](rocksdb::IteratorMode) - /// of [`End`](rocksdb::IteratorMode::End), or [`From`](rocksdb::IteratorMode::From) - /// with [`Direction::Reverse`](rocksdb::Direction::Reverse). + /// Accepts a `reverse` argument. If it is `true`, creates the iterator with an + /// [`IteratorMode`](rocksdb::IteratorMode) of [`End`](rocksdb::IteratorMode::End), or + /// [`From`](rocksdb::IteratorMode::From) with [`Direction::Reverse`](rocksdb::Direction::Reverse). /// /// Holding this iterator open might delay block commit transactions. pub fn zs_range_iter( @@ -532,17 +532,16 @@ impl DiskDb { let upper_bound = match range.end_bound().cloned() { Included(mut bound) => { - // Increment the last byte in the upper bound that is less than u8::MAX, - // and clear any bytes after it to increment the big-endian number this - // Vec represents to RocksDB. - let is_zero = bound.iter_mut().rev().all(|v| { + // Increment the last byte in the upper bound that is less than u8::MAX, and + // clear any bytes after it to increment the next key in lexicographic order + // (next big-endian number) this Vec represents to RocksDB. + let is_wrapped_overflow = bound.iter_mut().rev().all(|v| { *v = v.wrapping_add(1); v == &0 }); - if is_zero { - bound.push(0); - *bound.get_mut(0).expect("should have at least 1 element") += 1; + if is_wrapped_overflow { + bound.insert(0, 0x01) } Some(bound) diff --git a/zebra-state/src/service/finalized_state/disk_db/tests.rs b/zebra-state/src/service/finalized_state/disk_db/tests.rs index ebfa44ede75..8e71ce64d09 100644 --- a/zebra-state/src/service/finalized_state/disk_db/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_db/tests.rs @@ -51,7 +51,7 @@ fn zs_iter_opts_increments_key_by_one() { let (_, bytes) = DiskDb::zs_iter_bounds(&..=key.to_be_bytes().to_vec()); let mut bytes = bytes.expect("there should be an upper bound"); let upper_bound_bytes = bytes.split_off(bytes.len() - 4); - let upper_bound = u32::from_be_bytes(upper_bound_bytes.try_into().unwrap()); + let upper_bound = u32::from_be_bytes(upper_bound_bytes.try_into().expect("no added bytes")); let expected_upper_bound = key.wrapping_add(1); assert_eq!( From 1c595fb1a5a68d7f72ea6226e725a21bc6861401 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 18 Oct 2023 13:40:29 +1000 Subject: [PATCH 55/59] Add extra checks --- .../src/service/finalized_state/disk_db/tests.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/zebra-state/src/service/finalized_state/disk_db/tests.rs b/zebra-state/src/service/finalized_state/disk_db/tests.rs index 8e71ce64d09..e7c9a26919e 100644 --- a/zebra-state/src/service/finalized_state/disk_db/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_db/tests.rs @@ -30,6 +30,7 @@ impl DiskDb { fn zs_iter_opts_increments_key_by_one() { let _init_guard = zebra_test::init(); + // TODO: add an empty key (`()` type or `[]` when serialized) test case let keys: [u32; 14] = [ 0, 1, @@ -65,6 +66,18 @@ fn zs_iter_opts_increments_key_by_one() { vec![1], "there should be an extra byte with a value of 1" ); + } else { + assert_eq!( + key.len(), + bytes.len(), + "there should be no extra bytes" + ); } + + assert_ne!( + bytes[0], + 0x00, + "there must be at least one byte, and the first byte can't be zero" + ); } } From f64456220234c529579744d48e63eecc64e4d3a0 Mon Sep 17 00:00:00 2001 From: teor Date: Wed, 18 Oct 2023 14:11:17 +1000 Subject: [PATCH 56/59] Fix test code and rustfmt --- zebra-state/src/service/finalized_state/disk_db/tests.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db/tests.rs b/zebra-state/src/service/finalized_state/disk_db/tests.rs index e7c9a26919e..c52053ba050 100644 --- a/zebra-state/src/service/finalized_state/disk_db/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_db/tests.rs @@ -68,15 +68,14 @@ fn zs_iter_opts_increments_key_by_one() { ); } else { assert_eq!( - key.len(), + key.to_be_bytes().len(), bytes.len(), "there should be no extra bytes" ); } assert_ne!( - bytes[0], - 0x00, + bytes[0], 0x00, "there must be at least one byte, and the first byte can't be zero" ); } From cbca3343f4c272db2ca49e11d076f6ff70595878 Mon Sep 17 00:00:00 2001 From: arya2 Date: Wed, 18 Oct 2023 01:00:50 -0400 Subject: [PATCH 57/59] fixes test --- .../service/finalized_state/disk_db/tests.rs | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db/tests.rs b/zebra-state/src/service/finalized_state/disk_db/tests.rs index c52053ba050..0fd8f857f9c 100644 --- a/zebra-state/src/service/finalized_state/disk_db/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_db/tests.rs @@ -50,9 +50,9 @@ fn zs_iter_opts_increments_key_by_one() { for key in keys { let (_, bytes) = DiskDb::zs_iter_bounds(&..=key.to_be_bytes().to_vec()); - let mut bytes = bytes.expect("there should be an upper bound"); - let upper_bound_bytes = bytes.split_off(bytes.len() - 4); - let upper_bound = u32::from_be_bytes(upper_bound_bytes.try_into().expect("no added bytes")); + let mut extra_bytes = bytes.expect("there should be an upper bound"); + let bytes = extra_bytes.split_off(extra_bytes.len() - 4); + let upper_bound = u32::from_be_bytes(bytes.clone().try_into().expect("should be 4 bytes")); let expected_upper_bound = key.wrapping_add(1); assert_eq!( @@ -62,21 +62,18 @@ fn zs_iter_opts_increments_key_by_one() { if expected_upper_bound == 0 { assert_eq!( - bytes, + extra_bytes, vec![1], "there should be an extra byte with a value of 1" ); } else { - assert_eq!( - key.to_be_bytes().len(), - bytes.len(), - "there should be no extra bytes" - ); + assert_eq!(extra_bytes.len(), 0, "there should be no extra bytes"); } assert_ne!( - bytes[0], 0x00, - "there must be at least one byte, and the first byte can't be zero" + extra_bytes.last().expect("there must be at least one byte"), + &0, + "the last byte can't be zero" ); } } From da0afea7c44888cd041895e18c02ddadd7e2e5fd Mon Sep 17 00:00:00 2001 From: arya2 Date: Wed, 18 Oct 2023 01:04:10 -0400 Subject: [PATCH 58/59] fixes test --- zebra-state/src/service/finalized_state/disk_db/tests.rs | 6 ------ 1 file changed, 6 deletions(-) diff --git a/zebra-state/src/service/finalized_state/disk_db/tests.rs b/zebra-state/src/service/finalized_state/disk_db/tests.rs index 0fd8f857f9c..20fecbbf127 100644 --- a/zebra-state/src/service/finalized_state/disk_db/tests.rs +++ b/zebra-state/src/service/finalized_state/disk_db/tests.rs @@ -69,11 +69,5 @@ fn zs_iter_opts_increments_key_by_one() { } else { assert_eq!(extra_bytes.len(), 0, "there should be no extra bytes"); } - - assert_ne!( - extra_bytes.last().expect("there must be at least one byte"), - &0, - "the last byte can't be zero" - ); } } From 9afdbebd31743ea3f64ecfaca52a0ecf51565644 Mon Sep 17 00:00:00 2001 From: Gustavo Valverde Date: Wed, 18 Oct 2023 11:15:43 +0100 Subject: [PATCH 59/59] chore: missing merge conflicts --- .github/workflows/sub-test-zebra-config.yml | 8 -------- 1 file changed, 8 deletions(-) diff --git a/.github/workflows/sub-test-zebra-config.yml b/.github/workflows/sub-test-zebra-config.yml index bbe76124389..d8e856f0748 100644 --- a/.github/workflows/sub-test-zebra-config.yml +++ b/.github/workflows/sub-test-zebra-config.yml @@ -14,13 +14,10 @@ on: description: 'Patterns to grep for in the logs' # Test selection and parameters -<<<<<<< HEAD -======= docker_image: required: true type: string description: 'Docker image to test' ->>>>>>> main test_variables: required: true type: string @@ -50,13 +47,8 @@ jobs: - name: Run ${{ inputs.test_id }} test run: | -<<<<<<< HEAD - docker pull ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} - docker run ${{ inputs.test_variables }} --detach --name ${{ inputs.test_id }} -t ${{ vars.GAR_BASE }}/${{ vars.CI_IMAGE_NAME }}:sha-${{ env.GITHUB_SHA_SHORT }} zebrad start -======= docker pull ${{ inputs.docker_image }} docker run ${{ inputs.test_variables }} --detach --name ${{ inputs.test_id }} -t ${{ inputs.docker_image }} zebrad start ->>>>>>> main # Use a subshell to handle the broken pipe error gracefully ( trap "" PIPE;