Skip to content

Nvqc regression tests #84

Nvqc regression tests

Nvqc regression tests #84

name: Nvqc regression tests
concurrency:
# only one integration tests workflow to be run at a time, since it involves pushing/cleaning up a test image (same tag).
group: ${{ github.workflow }}${{ github.event.workflow_run.name }}
cancel-in-progress: false
# Run on request and every day at 3 AM UTC
on:
workflow_dispatch:
inputs:
cudaq_test_image:
type: string
required: false
default: 'nvcr.io/nvidia/nightly/cuda-quantum:latest' # If changed, update env defaults, too
description: 'CUDA Quantum image to run the tests in. Default to the latest CUDA Quantum nightly image'
commit_sha:
type: string
required: false
description: 'Commit SHA to pull the code (examples/tests) for testing. Default to the commit associated with the CUDA Quantum docker image if left blank'
workflow_id:
type: string
required: false
description: 'Workflow Id to retrieve the Python wheel for testing. Default to the wheels produced by the Publishing workflow associated with the latest nightly CUDA Quantum Docker image if left blank'
python_version:
type: choice
required: true
default: '3.10' # If changed, update env defaults, too
description: 'Python version to run wheel test'
options:
- '3.8'
- '3.9'
- '3.10'
- '3.11'
schedule:
- cron: 0 3 * * *
env:
# If vars below are changed, it is recommended to also update the
# workflow_dispatch defaults above so they stay in sync.
cudaq_test_image: nvcr.io/nvidia/nightly/cuda-quantum:latest
python_version: '3.10'
jobs:
# We need this job purely to choose the container image values because the
# `env` context is unavailable outside of "steps" contexts.
setup:
name: Set variables
runs-on: ubuntu-latest
outputs:
cudaq_test_image: ${{ steps.vars.outputs.cudaq_test_image }}
steps:
- name: Set variables
id: vars
run: |
echo "cudaq_test_image=${{ inputs.cudaq_test_image || env.cudaq_test_image }}" >> $GITHUB_OUTPUT
metadata:
name: Retrieve commit info
runs-on: ubuntu-latest
needs: setup
environment: backend-validation
container:
image: ${{ needs.setup.outputs.cudaq_test_image }}
options: --user root
outputs:
cudaq_commit: ${{ steps.commit-sha.outputs.sha }}
steps:
- name: Get commit SHA
id: commit-sha
run: |
if [ -n "${{ inputs.commit_sha }}" ]; then
echo "sha=${{ inputs.commit_sha }}" >> $GITHUB_OUTPUT
else
echo "sha=$(cat $CUDA_QUANTUM_PATH/build_info.txt | grep -o 'source-sha: \S*' | cut -d ' ' -f 2)" >> $GITHUB_OUTPUT
fi
nvqc_integration_docker_test:
name: NVQC integration test using Docker image
runs-on: ubuntu-latest
needs: [setup, metadata]
environment: backend-validation
container:
image: ${{ needs.setup.outputs.cudaq_test_image }}
options: --user root
steps:
- name: Get code
uses: actions/checkout@v4
with:
ref: ${{ needs.metadata.outputs.cudaq_commit }}
fetch-depth: 1
- name: Submit to NVQC
run: |
echo "### Submit to NVQC" >> $GITHUB_STEP_SUMMARY
export NVQC_API_KEY="${{ secrets.NVQC_PROD_SERVICE_KEY }}"
set +e # Allow script to keep going through errors
test_err_sum=0
# Test all NVQPP execution tests
for filename in `find targettests/execution/ -name '*.cpp'`; do
echo "$filename"
# Only run tests that require execution (not a syntax-only check)
if grep -q "ifndef SYNTAX_CHECK" "$filename"; then
nvq++ -v $filename --target nvqc
test_status=$?
if [ $test_status -eq 0 ]; then
./a.out
test_status=$?
if [ $test_status -eq 0 ]; then
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY
else
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
else
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
fi
done
# Test all remote-sim tests
for filename in `find targettests/Remote-Sim -name '*.cpp'`; do
# unsupport_args and compile_errors are compile error tests
# pauli_word: https://github.com/NVIDIA/cuda-quantum/issues/1957
# nested_vectors: related to vector of pauli_words (https://github.com/NVIDIA/cuda-quantum/issues/1957)
# custom_operation: https://github.com/NVIDIA/cuda-quantum/issues/1985
# return_values: only supported in 0.8 NVQC service.
if [[ "$filename" != *"unsupport_args"* ]] && [[ "$filename" != *"state_overlap"* ]] && [[ "$filename" != *"compile_errors"* ]] && [[ "$filename" != *"nested_vectors"* ]] && [[ "$filename" != *"pauli_word"* ]] && [[ "$filename" != *"custom_operation"* ]] && [[ "$filename" != *"return_values"* ]]; then
echo "$filename"
nvqc_config=""
# Look for a --remote-mqpu-auto-launch to determine the number of QPUs
num_qpus=`cat $filename | grep -oP -m 1 '^//\s*RUN:\s*nvq++.+--remote-mqpu-auto-launch\s+\K\S+'`
if [ -n "$num_qpus" ]; then
echo "Intended to run on '$num_qpus' QPUs."
nvqc_config="$nvqc_config --nvqc-nqpus $num_qpus"
fi
nvq++ -v $filename --target nvqc $nvqc_config
test_status=$?
if [ $test_status -eq 0 ]; then
./a.out
test_status=$?
if [ $test_status -eq 0 ]; then
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY
else
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
else
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
fi
done
# Test C++ examples with NVQC
for filename in `find examples/cpp/ -name '*.cpp'`; do
if [[ "$filename" == *"nvqc"* ]]; then
echo "$filename"
nvqc_config=""
# Look for a --nvqc-backend flag to nvq++ in the comment block
nvqc_backend=`sed -e '/^$/,$d' $filename | grep -oP -m 1 '^//\s*nvq++.+--nvqc-backend\s+\K\S+'`
if [ -n "$nvqc_backend" ]; then
echo "Intended for execution on '$nvqc_backend' backend."
nvqc_config="$nvqc_config --nvqc-backend $nvqc_backend"
fi
# Look for a --nvqc-nqpus flag to nvq++ in the comment block
num_qpus=`sed -e '/^$/,$d' $filename | grep -oP -m 1 '^//\s*nvq++.+--nvqc-nqpus\s+\K\S+'`
if [ -n "$num_qpus" ]; then
echo "Intended to run on '$num_qpus' QPUs."
nvqc_config="$nvqc_config --nvqc-nqpus $num_qpus"
fi
nvq++ -v $filename --target nvqc $nvqc_config
test_status=$?
if [ $test_status -eq 0 ]; then
./a.out
test_status=$?
if [ $test_status -eq 0 ]; then
echo ":white_check_mark: Successfully ran test: $filename" >> $GITHUB_STEP_SUMMARY
else
echo ":x: Test failed (failed to execute): $filename" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
else
echo ":x: Test failed (failed to compile): $filename" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
fi
done
# Test NVQC Python examples + Python MLIR execution tests (not IR tests)
python3 -m pip install pytest
for ex in `find examples/python python/tests/mlir/target -name '*.py' -not -path '*/python/tutorials/*'`; do
filename=$(basename -- "$ex")
filename="${filename%.*}"
echo "Testing $filename:"
if [[ "$ex" == *"nvqc"* ]]; then
# This is an NVQC example
python3 $ex 1> /dev/null
test_status=$?
if [ $test_status -eq 0 ]; then
echo ":white_check_mark: Successfully ran test: $ex" >> $GITHUB_STEP_SUMMARY
else
echo ":x: Test failed (failed to execute): $ex" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
else
# Only run examples that are not target-specific (e.g., ionq, iqm)
if ! grep -q "set_target" "$ex"; then
# Use --target command line option to run these examples with nvqc
python3 $ex --target nvqc 1> /dev/null
test_status=$?
if [ $test_status -eq 0 ]; then
echo ":white_check_mark: Successfully ran test: $ex" >> $GITHUB_STEP_SUMMARY
else
echo ":x: Test failed (failed to execute): $ex" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
fi
fi
done
set -e # Re-enable exit code error checking
if [ ! $test_err_sum -eq 0 ]; then
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures"
exit 1
fi
shell: bash
nvqc_integration_wheel_test:
name: NVQC integration test using Python wheels
runs-on: ubuntu-latest
needs: [metadata]
environment: backend-validation
steps:
- name: Get code
uses: actions/checkout@v4
with:
ref: ${{ needs.metadata.outputs.cudaq_commit }}
fetch-depth: 1
- name: Install wheel
id: install_wheel
run: |
python_version=${{ inputs.python_version || env.python_version }}
workflow_id=${{ inputs.workflow_id }}
# Helper to get the *valid* Publishing run Id for a commit hash
# Notes: runs that have 'CUDA Quantum Python wheels' jobs skipped are not considered.
function get_publishing_run_id {
# Find all Publishing runs, we'll look into its jobs' status later
if [[ -z "$1" ]]; then
publishing_run_ids=$(gh run -R NVIDIA/cuda-quantum list --workflow Publishing --json databaseId --jq .[].databaseId)
else
publishing_run_ids=$(gh run -R NVIDIA/cuda-quantum list --commit $1 --workflow Publishing --json databaseId --jq .[].databaseId)
fi
for run_id in $publishing_run_ids ; do
# Look into its jobs: if "CUDA Quantum Python wheels" matrix build was performed,
# then we have multiple jobs, like "CUDA Quantum Python wheels (python_arm64....")
cuda_wheel_build_jobs=$(gh run -R NVIDIA/cuda-quantum view $run_id --jq '.jobs.[] | select(.name | startswith("CUDA Quantum Python wheels (python_")).name' --json jobs)
if [ ! -z "$cuda_wheel_build_jobs" ]; then
# This is a valid run that produces wheel artifacts
echo $run_id
break
fi
done
}
if [ -z "${workflow_id}" ]; then
workflow_id=$(get_publishing_run_id ${{ needs.metadata.outputs.cudaq_commit }})
fi
if [ ! -z "$workflow_id" ]; then
echo "Using artifacts from workflow id $workflow_id"
# Allow error when trying to download wheel artifacts since they might be expired.
set +e
gh run -R NVIDIA/cuda-quantum download $workflow_id --name "x86_64-py$python_version-wheels"
retVal=$?
set -e
if [ $retVal -ne 0 ]; then
echo "Failed to download wheels artifact from Publishing workflow run Id $workflow_id. Perhaps the artifacts have been expired."
# This is allowed since there might be a period where no Publishing workflow is run (e.g., no PR merged to main).
echo "skipped=true" >> $GITHUB_OUTPUT
exit 0
fi
python_version_filename=$(echo "${python_version//.}")
# Install Python and the wheel
apt-get update && apt-get install -y --no-install-recommends python$python_version python3-pip
wheelfile=$(find . -name "cuda_quantum*cp$python_version_filename*x86_64.whl")
python$python_version -m pip install $wheelfile
echo "skipped=false" >> $GITHUB_OUTPUT
else
echo "Failed to retrieve Publishing workflow run Id for commit ${{ needs.metadata.outputs.cudaq_commit }}"
exit 1
fi
env:
GH_TOKEN: ${{ github.token }}
- name: Test NVQC
if: ${{ ! steps.install_wheel.skipped }}
run: |
echo "### Submit to NVQC from Python wheels" >> $GITHUB_STEP_SUMMARY
python_version=${{ inputs.python_version || env.python_version }}
export NVQC_API_KEY="${{ secrets.NVQC_PROD_SERVICE_KEY }}"
set +e # Allow script to keep going through errors
python$python_version -m pip install pytest
test_err_sum=0
for ex in `find examples/python python/tests/mlir/target -name '*.py' -not -path '*/python/tutorials/*'`; do
filename=$(basename -- "$ex")
filename="${filename%.*}"
echo "Testing $filename:"
if [[ "$ex" == *"nvqc"* ]]; then
python$python_version $ex 1> /dev/null
test_status=$?
if [ $test_status -eq 0 ]; then
echo ":white_check_mark: Successfully ran test: $ex" >> $GITHUB_STEP_SUMMARY
else
echo ":x: Test failed (failed to execute): $ex" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
else
# Only run examples that are not target-specific (e.g., ionq, iqm)
if ! grep -q "set_target" "$ex"; then
# Use --target command line option to run these examples with nvqc
python$python_version $ex --target nvqc 1> /dev/null
test_status=$?
if [ $test_status -eq 0 ]; then
echo ":white_check_mark: Successfully ran test: $ex" >> $GITHUB_STEP_SUMMARY
else
echo ":x: Test failed (failed to execute): $ex" >> $GITHUB_STEP_SUMMARY
test_err_sum=$((test_err_sum+1))
fi
fi
fi
done
set -e # Re-enable exit code error checking
if [ ! $test_err_sum -eq 0 ]; then
echo "::error::${test_err_sum} tests failed. See step summary for a list of failures"
exit 1
fi