diff --git a/.github/workflows/build_packages.yml b/.github/workflows/build_packages.yml new file mode 100644 index 0000000000..10c7c35459 --- /dev/null +++ b/.github/workflows/build_packages.yml @@ -0,0 +1,263 @@ +on: + workflow_call: + inputs: + devdeps_image: + required: false + type: string + devdeps_cache: + required: false + type: string + devdeps_archive: + required: false + type: string + environment: + required: false + type: string + +name: Packages # do not change name without updating workflow_run triggers + +jobs: + metadata: + name: Metadata + runs-on: ubuntu-latest + permissions: + contents: read + + outputs: + image_name: ${{ steps.metadata.outputs.image_name }} + image_tag: ${{ steps.metadata.outputs.image_tag }} + + steps: + - name: Determine metadata + id: metadata + run: | + repo_owner=${{ github.repository_owner }} + image_name=${{ vars.registry || 'ghcr.io' }}/${repo_owner,,}/cuda-quantum + if ${{ github.event.pull_request.number != '' }}; then + image_tag=pr-${{ github.event.pull_request.number }} + elif ${{ github.ref_type == 'branch' && github.ref_name == 'main' }}; then + image_tag=latest + elif ${{ github.ref_type == 'tag' || startsWith(github.ref_name, 'releases/') }}; then + image_tag=`echo ${{ github.ref_name }} | egrep -o "([0-9]{1,}\.)+[0-9]{1,}"` + else + image_tag=`echo ${{ github.ref_name }} | tr '/' '-'` + fi + + echo "image_name=$image_name" >> $GITHUB_OUTPUT + echo "image_tag=$image_tag" >> $GITHUB_OUTPUT + + release_build: + name: Release build + runs-on: ubuntu-latest + permissions: + contents: read + + outputs: + cudaqdev_cache: ${{ steps.cudaq_build.outputs.cudaqdev_cache }} + cudaqdev_tarfile: ${{ steps.cudaq_build.outputs.cudaqdev_tarfile }} + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Restore environment + if: inputs.devdeps_cache && inputs.devdeps_archive + id: restore + uses: actions/cache/restore@v3 + with: + path: ${{ inputs.devdeps_archive }} + key: ${{ inputs.devdeps_cache }} + fail-on-cache-miss: true + + - name: Build CUDA Quantum + id: cudaq_build + run: | + if ${{ steps.restore.outcome != 'skipped' }}; then + base_image=`docker load --input ${{ inputs.devdeps_archive }} | grep -o 'Loaded image: \S*:\S*' | cut -d ' ' -f 3` + elif ${{ inputs.devdeps_image != '' }}; then + docker pull ${{ inputs.devdeps_image }} + base_image=${{ inputs.devdeps_image }} + else + echo "Missing configuration for development dependencies. Either specify the image (i.e. provide devdeps_image) or cache (i.e. provide devdeps_cache and devdeps_archive) that should be used for the build." >> $GITHUB_STEP_SUMMARY + exit 1 + fi + + docker build -t cuda-quantum-dev:local -f docker/build/cudaqdev.Dockerfile . \ + --build-arg base_image=$base_image \ + --build-arg install="CMAKE_BUILD_TYPE=Release FORCE_COMPILE_GPU_COMPONENTS=true" + + cache_id=`docker inspect cuda-quantum-dev:local --format='{{.Config.Labels}}' | grep -o image.version:[A-Za-z0-9_-]* | cut -d ":" -f 2` + cudaqdev_cache=tar-cudaqdev-${cache_id}-${{ github.sha }} + + cudaqdev_tarfile=/tmp/cudaqdev.tar + docker save cuda-quantum-dev:local > $cudaqdev_tarfile + + echo "cudaqdev_cache=$cudaqdev_cache" >> $GITHUB_OUTPUT + echo "cudaqdev_tarfile=$cudaqdev_tarfile" >> $GITHUB_OUTPUT + + - name: Cache cuda-quantum-dev image + uses: actions/cache/save@v3 + with: + path: ${{ steps.cudaq_build.outputs.cudaqdev_tarfile }} + key: ${{ steps.cudaq_build.outputs.cudaqdev_cache }} + + docker_image: + name: Docker image + runs-on: ubuntu-latest + needs: [release_build, metadata] + permissions: + contents: read + + outputs: + tar_cache: ${{ steps.cudaq_image.outputs.tar_cache }} + tar_archive: ${{ steps.cudaq_image.outputs.tar_archive }} + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Restore release build + uses: actions/cache/restore@v3 + with: + path: ${{ needs.release_build.outputs.cudaqdev_tarfile }} + key: ${{ needs.release_build.outputs.cudaqdev_cache }} + fail-on-cache-miss: true + + - name: Build cuda-quantum image + id: cudaq_image + run: | + image_name=${{ needs.metadata.outputs.image_name }} + image_tag=${{ needs.metadata.outputs.image_tag }} + tar_archive=/tmp/cuda-quantum.tar + + docker load --input ${{ needs.release_build.outputs.cudaqdev_tarfile }} + docker build -t $image_name:$image_tag -f docker/release/cudaq.Dockerfile . \ + --build-arg dev_image=cuda-quantum-dev --build-arg dev_tag=local \ + --build-arg release_version=$image_tag + + rm -rf "${{ needs.release_build.outputs.cudaqdev_tarfile }}" + docker image rm cuda-quantum-dev:local + docker builder prune --all --force + docker save $image_name:$image_tag > $tar_archive + + echo "tar_archive=$tar_archive" >> $GITHUB_OUTPUT + echo "tar_cache=tar-cuda-quantum-${{ github.sha }}" >> $GITHUB_OUTPUT + + - name: Cache cuda-quantum image + uses: actions/cache/save@v3 + with: + path: ${{ steps.cudaq_image.outputs.tar_archive }} + key: ${{ steps.cudaq_image.outputs.tar_cache }} + + - name: Validate cuda-quantum image + run: | + docker run --rm -dit --name cuda-quantum ${{ needs.metadata.outputs.image_name }}:${{ needs.metadata.outputs.image_tag }} + docker cp scripts/validate_container.sh cuda-quantum:"/home/cudaq/validate_container.sh" + docker exec -e TERM=xterm cuda-quantum bash validate_container.sh dm > /tmp/validation.out + docker stop cuda-quantum + + - name: Create job summary + run: | + echo "## Validation" >> $GITHUB_STEP_SUMMARY + echo "The validation of the cuda-quantum image produced the following output:" >> $GITHUB_STEP_SUMMARY + echo '```text' >> $GITHUB_STEP_SUMMARY + cat /tmp/validation.out >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + + deployment: + name: Deployment + if: inputs.environment + needs: docker_image + uses: ./.github/workflows/deploy_to_registry.yml + with: + environment: ${{ inputs.environment }} + cache_key: ${{ needs.docker_image.outputs.tar_cache }} + tar_archive: ${{ needs.docker_image.outputs.tar_archive }} + + documentation: + name: Documentation + runs-on: ubuntu-latest + needs: [release_build, metadata] + permissions: + contents: read + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Restore release build + uses: actions/cache/restore@v3 + with: + path: ${{ needs.release_build.outputs.cudaqdev_tarfile }} + key: ${{ needs.release_build.outputs.cudaqdev_cache }} + fail-on-cache-miss: true + + - name: Build documentation + id: docs_build + run: | + docs_version="CUDA_QUANTUM_VERSION=${{ needs.metadata.outputs.image_tag }}" + + docker load --input ${{ needs.release_build.outputs.cudaqdev_tarfile }} + docker run --rm -dit --name cuda-quantum-dev cuda-quantum-dev:local + (docker exec cuda-quantum-dev bash -c "export $docs_version && bash scripts/build_docs.sh" && built=true) || built=false + if $built; then docker cp cuda-quantum-dev:"/usr/local/cudaq/docs/." docs; \ + else docker cp cuda-quantum-dev:"/workspaces/cuda-quantum/build/." /tmp/build; fi + docker stop cuda-quantum-dev + if $built; then `exit 0`; else `exit 1`; fi + + html_files=`find docs/api/ -type f -name "*.html"` + json="{\"html_files\":[]}" + for file in $html_files; do + file=\'$file\' + json=`echo $json | jq ".html_files |= . + [\"$file\"]"` + done + echo "json=$(echo $json)" >> $GITHUB_OUTPUT + + - name: Upload build artifacts + if: failure() + uses: actions/upload-artifact@v3 + with: + name: build + path: /tmp/build + retention-days: 1 + + - name: Upload documentation + if: success() + uses: actions/upload-artifact@v3 + with: + name: cuda_quantum_docs # changing the artifact name requires updating other workflows + path: docs + retention-days: 30 + if-no-files-found: error + + - name: Spell check HTML documentation + if: success() + continue-on-error: true # to be removed once we update all docs for this check to pass + uses: rojopolis/spellcheck-github-actions@0.30.0 + with: + config_path: '.github/workflows/config/spellcheck_config.yml' + task_name: html + source_files: ${{ join(fromJSON(steps.docs_build.outputs.json).html_files, ' ') }} + + clean_up: + name: Prepare cache clean-up + runs-on: ubuntu-latest + needs: [release_build, docker_image, documentation] + if: always() # need to clean up even if the workflow is cancelled or fails + + steps: + - name: Save cache keys + id: workflow_inputs + run: | + keys=${{ needs.release_build.outputs.cudaqdev_cache }} + if ${{ inputs.environment == '' }}; then + keys+=" ${{ needs.docker_image.outputs.tar_cache }}" + fi + echo "$keys" >> cache_keys.txt + + - uses: actions/upload-artifact@v3 + with: + name: cache_keys_packages + path: cache_keys.txt + retention-days: 1 + if-no-files-found: error diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 0000000000..0ad46b2892 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,91 @@ +on: + workflow_dispatch: + inputs: + export_environment: + type: boolean + description: Export the build environment as tar artifact that can be imported with Docker. + pull_request: + branches: + - 'main' + - 'releases/*' + +name: CI # do not change name without updating workflow_run triggers + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + setup: + name: Load dependencies + strategy: + matrix: + toolchain: [llvm, clang16, gcc12] + fail-fast: false + uses: ./.github/workflows/dev_environment.yml + with: + dockerfile: build/devdeps.Dockerfile + toolchain: ${{ matrix.toolchain }} + # needed only for the cloudposse GitHub action + matrix_key: ${{ matrix.toolchain }} + + # This job is needed only when using the cloudposse GitHub action to read + # the output of a matrix job. This is a workaround due to current GitHub + # limitations that may not be needed if the work started here concludes: + # https://github.com/actions/runner/pull/2477 + config: + name: Configure build + runs-on: ubuntu-latest + needs: setup + + outputs: + json: "${{ steps.read_json.outputs.result }}" + + steps: + - uses: cloudposse/github-action-matrix-outputs-read@0.1.1 + id: read_json + with: + matrix-step-name: dev_environment + + build_and_test: + name: Build and test + needs: config + strategy: + matrix: + toolchain: [llvm, clang16, gcc12] + fail-fast: false + uses: ./.github/workflows/test_in_devenv.yml + with: + devdeps_cache: ${{ fromJson(needs.config.outputs.json).cache_key[format('{0}', matrix.toolchain)] }} + devdeps_archive: ${{ fromJson(needs.config.outputs.json).tar_archive[format('{0}', matrix.toolchain)] }} + export_environment: ${{ github.event_name == 'workflow_dispatch' && inputs.export_environment == 'true' }} + + docker_image: + name: Create Packages + needs: config + uses: ./.github/workflows/build_packages.yml + with: + devdeps_cache: ${{ fromJson(needs.config.outputs.json).cache_key.llvm }} + devdeps_archive: ${{ fromJson(needs.config.outputs.json).tar_archive.llvm }} + + clean_up: + name: Prepare cache clean-up + runs-on: ubuntu-latest + needs: [config, build_and_test, docker_image] + if: always() # need to clean up even if the workflow is cancelled or fails + + steps: + - name: Save cache keys + id: workflow_inputs + run: | + set -e + key_matrix='${{ needs.config.outputs.json }}' + keys=`echo $key_matrix | jq '.cache_key | to_entries | .[].value' --raw-output` + echo "$keys" >> cache_keys.txt + + - uses: actions/upload-artifact@v3 + with: + name: cache_keys_ci + path: cache_keys.txt + retention-days: 1 + if-no-files-found: error diff --git a/.github/workflows/cla_assistant.yml b/.github/workflows/cla_assistant.yml index cd0c26d20a..9ca003c520 100644 --- a/.github/workflows/cla_assistant.yml +++ b/.github/workflows/cla_assistant.yml @@ -15,8 +15,8 @@ jobs: if: (github.event.comment.body == 'recheck' || github.event.comment.body == 'I have read the Contributor License Agreement and I hereby accept the Terms.') || github.event_name == 'pull_request_target' uses: cla-assistant/github-action@v2.1.3-beta env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - PERSONAL_ACCESS_TOKEN : ${{ secrets.CLA_BOT_ACCESS_TOKEN }} + GITHUB_TOKEN: ${{ github.token }} + PERSONAL_ACCESS_TOKEN : ${{ secrets.REPO_BOT_ACCESS_TOKEN }} with: remote-organization-name: NVIDIA remote-repository-name: cuda-quantum diff --git a/.github/workflows/clean_caches.yml b/.github/workflows/clean_caches.yml new file mode 100644 index 0000000000..7d9c0775df --- /dev/null +++ b/.github/workflows/clean_caches.yml @@ -0,0 +1,134 @@ +on: + workflow_dispatch: + inputs: + cache_type: + type: choice + description: What kind of caches to delete. Note that *all* caches of that type will be deleted. + options: + - branch + - tar + - selective + required: false + default: selective + cache_keys: + type: string + description: Space separated string of cache keys to delete. + required: false + workflow_run: + workflows: + - CI + - Packages + types: + - completed + delete: + pull_request_target: + types: + - closed + +name: Clean GitHub caches + +jobs: + automatic_cleanup: + name: Clean up GitHub caches produced by other workflows + if: github.event_name == 'workflow_run' + permissions: write-all + runs-on: ubuntu-latest + + steps: + - name: Download cache keys + id: artifacts + run: | + artifacts_url=${{ github.event.workflow_run.artifacts_url }} + artifacts=$(gh api $artifacts_url -q '.artifacts[] | {name: .name, url: .archive_download_url}') + + for artifact in `echo "$artifacts"`; do + name=`echo $artifact | jq -r '.name'` + if [ "${name#cache_key}" != "${name}" ]; then + url=`echo $artifact | jq -r '.url'` + gh api $url > cache_keys.zip + unzip -d cache_keys cache_keys.zip + for file in `find cache_keys/ -type f`; do + keys+=" `cat $file`" + done + rm -rf cache_keys cache_keys.zip + fi + done + echo "cache_keys='$(echo $keys)'" >> $GITHUB_OUTPUT + env: + GH_TOKEN: ${{ github.token }} + + - run: | + gh extension install actions/gh-actions-cache + for key in `echo ${{ steps.artifacts.outputs.cache_keys }}`; do + echo "Deleting cache $key" + gh actions-cache delete $key -R ${{ github.repository }} --confirm + done + env: + GH_TOKEN: ${{ github.token }} + + selective_cleanup: + name: Clean up selective GitHub caches + if: (github.event_name == 'workflow_dispatch' && inputs.cache_keys != '' ) + runs-on: ubuntu-latest + permissions: write-all + + steps: + - run: | + gh extension install actions/gh-actions-cache + for key in ${{ inputs.cache_keys }}; do + echo "Deleting cache $key" + gh actions-cache delete $key -R ${{ github.repository }} --confirm + done + env: + GH_TOKEN: ${{ github.token }} + + tar_cleanup: + name: Clean up Github caches with tar archives + if: (github.event_name == 'workflow_dispatch' && inputs.cache_type == 'tar' ) + runs-on: ubuntu-latest + permissions: write-all + + steps: + - run: | + gh extension install actions/gh-actions-cache + keys=$(gh actions-cache list -R ${{ github.repository }} --key tar- | cut -f 1 ) + for key in $keys; do + echo "Deleting cache $key" + gh actions-cache delete $key -R ${{ github.repository }} --confirm + done + env: + GH_TOKEN: ${{ github.token }} + + branch_cleanup: + name: Clean up branch specific GitHub caches + if: (github.event_name == 'workflow_dispatch' && inputs.cache_type == 'branch' ) || (github.event_name == 'delete' && github.event.ref_type == 'branch') + runs-on: ubuntu-latest + permissions: write-all + + steps: + - run: | + gh extension install actions/gh-actions-cache + keys=$(gh actions-cache list -R ${{ github.repository }} -B ${{ github.event.ref }} | cut -f 1 ) + for key in $keys; do + echo "Deleting cache $key" + gh actions-cache delete $key -R ${{ github.repository }} --confirm + done + env: + GH_TOKEN: ${{ github.token }} + + pr_cleanup: + name: Clean up PR related GitHub caches + if: github.event_name == 'pull_request_target' + runs-on: ubuntu-latest + permissions: write-all + + steps: + - run: | + gh extension install actions/gh-actions-cache + keys=$(gh actions-cache list -R ${{ github.repository }} -B refs/pull/${{ github.event.number }}/merge | cut -f 1 ) + for key in $keys; do + echo "Deleting cache $key" + gh actions-cache delete $key -R ${{ github.repository }} --confirm + done + env: + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/command_dispatch.yml b/.github/workflows/command_dispatch.yml new file mode 100644 index 0000000000..f4748c2c41 --- /dev/null +++ b/.github/workflows/command_dispatch.yml @@ -0,0 +1,112 @@ +on: + issue_comment: # applies to PR and issue comments + types: [created, edited] + +name: Slash command dispatch + +jobs: + preprocessing: + name: Check for slash command + runs-on: ubuntu-latest + permissions: + # keep this limited to read permissions for the content only! + contents: read + + outputs: + react: ${{ steps.parse.outputs.react }} + command_type: ${{ steps.parse.outputs.command_type }} + + steps: + - uses: actions/checkout@v3 + - id: parse + run: | + if ${{ startsWith(github.event.comment.body, '/') }}; then + echo "react=true" >> $GITHUB_OUTPUT + command=$(echo "${{ github.event.comment.body }}" | head -n 1 | xargs | cut -d " " -f 1) + + json_config=`cat .github/workflows/config/command_dispatch_config.json | sed 's/{{.*}}//g'` + entries=`echo "$json_config" | jq -c '.[]'` + for entry in $entries; do + command_name=`echo $entry | jq -r '.command'` + if [ "${command:1}" == "$command_name" ]; then + command_suffix=`echo $entry | jq -r '.event_type_suffix'` + echo "command_type=${command:1}${command_suffix:--command}" >> $GITHUB_OUTPUT + fi + done + fi + + command_dispatch: + name: Dispatch slash commands + needs: preprocessing + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Update configuration + id: config + run: | + pr_url=$(printf '%s\n' "${{ github.event.issue.pull_request.url }}" | sed -e 's/[\/&]/\\&/g') + github_repo=$(printf '%s\n' "${{ github.repository }}" | sed -e 's/[\/&]/\\&/g') + cat .github/workflows/config/command_dispatch_config.json \ + | sed "s/.{{[ ]*github.event.issue.pull_request.url[ ]*}}/$pr_url/g" \ + | sed "s/.{{[ ]*github.event.comment.id[ ]*}}/${{ github.event.comment.id }}/g" \ + | sed "s/.{{[ ]*github.repository[ ]*}}/$github_repo/g" > /tmp/command_dispatch_config.json + + - name: React to comment + if: needs.preprocessing.outputs.react == 'true' + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ github.event.comment.id }} + body: | + > ${{ github.event.comment.body }} + + **Command Bot:** Processing... + edit-mode: replace + reactions: eyes + + - name: Dispatch + id: dispatch + uses: peter-evans/slash-command-dispatch@v3 + with: + token: ${{ github.token }} + config-from-file: /tmp/command_dispatch_config.json + continue-on-error: true + + - name: Edit comment with error message + if: steps.dispatch.outputs.error-message + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ github.event.comment.id }} + body: | + > ${{ github.event.comment.body }} + + **Command Bot:** Could not process command. + ${{ steps.dispatch.outputs.error-message }} + # We replace the original comment to avoid an infinite loop + edit-mode: replace + reactions-edit-mode: replace + reactions: | + + - name: Indicate dispatch failure + if: ${{ needs.preprocessing.outputs.react == 'true' && steps.dispatch.outcome == 'failure' && ! steps.dispatch.outputs.error-message }} + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ github.event.comment.id }} + body: | + > ${{ github.event.comment.body }} + + **Command Bot:** Failed to dispatch command. For more information about available commands, use the command /help. + # We replace the original comment to avoid an infinite loop + edit-mode: replace + reactions-edit-mode: replace + reactions: | + + - name: Indicate successful dispatch + if: ${{ needs.preprocessing.outputs.command_type != '' && steps.dispatch.outcome == 'success' && ! steps.dispatch.outputs.error-message }} + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ github.event.comment.id }} + body: | + The launched workflow can be found [here](https://github.com/${{ github.event.repository.full_name }}/actions/workflows/${{ needs.preprocessing.outputs.command_type }}.yml). + edit-mode: append diff --git a/.github/workflows/config/command_dispatch_config.json b/.github/workflows/config/command_dispatch_config.json new file mode 100644 index 0000000000..71b7431c29 --- /dev/null +++ b/.github/workflows/config/command_dispatch_config.json @@ -0,0 +1,30 @@ +[ + { + "command": "help", + "event_type_suffix": "_command", + "issue_type": "both", + "dispatch_type": "repository", + "permission": "read", + "allow_edits": true + }, + { + "command": "show_context", + "event_type_suffix": "_command", + "issue_type": "both", + "dispatch_type": "repository", + "permission": "read", + "allow_edits": true + }, + { + "command": "create_cache", + "event_type_suffix": "_command", + "issue_type": "pull-request", + "dispatch_type": "workflow", + "permission": "write", + "allow_edits": true, + "static_args": [ + "pr_url=${{ github.event.issue.pull_request.url }}", + "comment_id=${{ github.event.comment.id }}" + ] + } +] diff --git a/.github/workflows/config/md_link_check_config.json b/.github/workflows/config/md_link_check_config.json new file mode 100644 index 0000000000..b0a33a6f3d --- /dev/null +++ b/.github/workflows/config/md_link_check_config.json @@ -0,0 +1,19 @@ +{ + "retryOn429": true, + "retryCount": 5, + "fallbackRetryDelay": "30s", + "aliveStatusCodes": [200, 206], + "httpHeaders": [ + { + "urls": ["https://github.com/", "https://guides.github.com/", "https://help.github.com/", "https://docs.github.com/"], + "headers": { + "Accept-Encoding": "zstd, br, gzip, deflate" + } + } + ], + "ignorePatterns": [ + { + "pattern": "^https://www.gnu.org/prep/standards/standards.html" + } + ] +} \ No newline at end of file diff --git a/.github/workflows/config/spellcheck_config.yml b/.github/workflows/config/spellcheck_config.yml new file mode 100644 index 0000000000..6898779607 --- /dev/null +++ b/.github/workflows/config/spellcheck_config.yml @@ -0,0 +1,235 @@ +spellchecker: aspell +matrix: + +- name: markdown + sources: + - '**/*.md' + expect_match: false + aspell: + lang: en + d: en_US + dictionary: + wordlists: + - .github/workflows/config/spelling_allowlist.txt + pipeline: + - pyspelling.filters.markdown: + markdown_extensions: + - pymdownx.superfences: + disable_indented_code_blocks: true + - pyspelling.filters.html: + comments: false + attributes: + - title + - alt + ignores: + - ':matches(code, pre)' + - 'code' + - 'pre' + - pyspelling.filters.context: + context_visible_first: true + delimiters: + # Ignore word prefixes, e.g. "pre-" in pre-computed + - open: '(^|\s)[a-z]+-' + close: '(?=.)' + # Ignore paths of files (recognized by them having a file extension) + - open: '([A-Za-z_\\\/]+\.)+' + close: '($|(?=[^a-z]))' + content: '\S+?' + +- name: rst + sources: + - '**/*.rst' + expect_match: false + aspell: + lang: en + d: en_US + dictionary: + wordlists: + - .github/workflows/config/spelling_allowlist.txt + pipeline: + - pyspelling.filters.markdown: + markdown_extensions: + - markdown.extensions.extra: + - pyspelling.filters.html: + comments: false + attributes: + - title + - alt + ignores: + - ':matches(code, pre)' + - 'code' + - 'pre' + - pyspelling.filters.context: + context_visible_first: true + delimiters: + # Ignore lines that start with two dots + - open: '^\s*\.\.' + close: '$' + # Ignore words between two colons, e.g. :maxdepth: + - open: '(^|\s):' + close: ':(\s|$)' + content: '[A-Za-z_-]*?' + # Ignore word prefixes, e.g. "pre-" in pre-computed + - open: '(^|\s)[a-z]+-' + close: '(?=.)' + +- name: html + sources: + - '**/*.html' + expect_match: true + aspell: + lang: en + d: en_US + dictionary: + wordlists: + - .github/workflows/config/spelling_allowlist.txt + pipeline: + - pyspelling.filters.html: + comments: false + attributes: [] + ignores: + - nav + - code + - pre + - pyspelling.filters.context: + context_visible_first: true + delimiters: + # Ignore word prefixes, e.g. "pre-" in pre-computed + - open: '(^|\s)[a-z]+-' + close: '(?=.)' + +- name: cxx_headers + sources: + - '**/*.h' + expect_match: false + aspell: + lang: en + d: en_US + dictionary: + wordlists: + - .github/workflows/config/spelling_allowlist.txt + - .github/workflows/config/spelling_allowlist_cxx.txt + pipeline: + - pyspelling.filters.url: + - pyspelling.filters.context: + context_visible_first: true + escapes: '\\[\\`~]' + delimiters: + # Ignore comments of the form `// namespace ...` since they are used to annotate the closing namespace + - open: '\/{2}\s*namespace\s+' + close: '($|\s)' + # Ignore comments of the form `/*...*/` since they are used to annotate argument names + - open: '\/\*' + close: '\*\/' + content: '\S*?' + # Ignore end of line comments + - open: '(^|\s)\/\/\s' + close: '$' + - pyspelling.filters.cpp: + block_comments: true + line_comments: true + group_comments: true + strings: false + - pyspelling.filters.context: + context_visible_first: true + escapes: '\\[\\`~]' + delimiters: + # Ignore multiline content between three or more backticks + - open: '(?s)(?P\s*`{3,})\S*\s*$' + close: '(?P=open)$' + # Ignore content between inline backticks + - open: '(?P`+)' + close: '(?P=open)' + # Ignore words that start with an @, e.g. @param used in doc comments + - open: '@\S' + close: '(\s|$)' + # Ignore words before and after double columns + - open: '([a-zA-Z0-9_])*::' + close: '([^a-zA-Z0-9_:]|$)' + # Ignore words that contain any underscores, numbers, or uppercase letters + - open: '[a-zA-Z0-9]+([A-Z0-9_])' + close: '([^a-zA-Z0-9_]|$)' + # Ignore words that start with uppercase letters or an underscore + - open: '(^|\s)([A-Z]|_)' + close: '($|\s|-)' + # Ignore word prefixes, e.g. "pre-" in pre-computed + - open: '(^|\s)[a-z]+-' + close: '(?=.)' + +- name: cxx_examples + sources: + - 'docs/sphinx/examples/**/*.cpp' + expect_match: false + aspell: + lang: en + d: en_US + dictionary: + wordlists: + - .github/workflows/config/spelling_allowlist.txt + pipeline: + - pyspelling.filters.url: + - pyspelling.filters.context: + context_visible_first: true + escapes: '\\[\\`~]' + delimiters: + # Ignore comments of the form `// namespace ...` since they are used to annotate the closing namespace + - open: '\/{2}\s*namespace\s+' + close: '($|\s)' + # Ignore comments of the form `/*...*/` since they are used to annotate argument names + - open: '\/\*' + close: '\*\/' + content: '\S*?' + - pyspelling.filters.cpp: + block_comments: true + line_comments: true + group_comments: true + strings: false + - pyspelling.filters.context: + context_visible_first: true + escapes: '\\[\\`~]' + delimiters: + # Ignore multiline content between three or more backticks + - open: '(?s)(?P\s*`{3,})\S*\s*$' + close: '(?P=open)$' + # Ignore content between inline backticks + - open: '(?P`+)' + close: '(?P=open)' + # Ignore words that start with an @, e.g. @param used in doc comments + - open: '@\S' + close: '(\s|$)' + # Ignore word prefixes, e.g. "pre-" in pre-computed + - open: '(^|\s)[a-z]+-' + close: '(?=.)' + +- name: python + sources: + - '**/*.py' + expect_match: false + aspell: + lang: en + d: en_US + dictionary: + wordlists: + - .github/workflows/config/spelling_allowlist.txt + pipeline: + - pyspelling.filters.url: + - pyspelling.filters.python: + docstrings: true + comments: true + strings: false + - pyspelling.filters.context: + context_visible_first: true + escapes: '\\[\\`~]' + delimiters: + # Ignore multiline content between three or more backticks + - open: '(?s)(?P\s*`{3,})\S*\s*$' + close: '(?P=open)$' + # Ignore content between inline backticks + - open: '(?P`+)' + close: '(?P=open)' + # Ignore words that start with uppercase letters or an underscore + - open: '(^|\s)([A-Z]|_)' + close: '($|\s|-)' + # Ignore word prefixes, e.g. "pre-" in pre-computed + - open: '(^|\s)[a-z]+-' + close: '(?=.)' diff --git a/.github/workflows/config/spelling_allowlist.txt b/.github/workflows/config/spelling_allowlist.txt new file mode 100644 index 0000000000..eed59e4f5e --- /dev/null +++ b/.github/workflows/config/spelling_allowlist.txt @@ -0,0 +1,153 @@ +API +APIs +AST +ASTConsumers +BFGS +CLA +CMake +COBYLA +CPU +CPUs +CUDA +CUDAQ +CUTN +DGX +FP +FileCheck +Fourier +GHCR +GPU +GPUs +GitHub +Hadamard +Hadamards +Hamiltonian +Hamiltonians +JIT +Kraus +LLVM +MLIR +MPI +Max-Cut +NGC +NUM +NVIDIA +NVQIR +Namespaces +OMP +OMPI +OpenMP +OpenMPI +OpenQASM +Optimizers +Pauli +Paulis +Pimpl +QAOA +QIR +QIS +QPU +QPUs +QTX +Quake +Toffoli +VQE +Vazirani +WSL +adjoint +al +ansatz +auxillary +backend +backends +baz +bitcode +bitstring +bitstrings +buildable +callable +callables +canonicalization +canonicalize +canonicalizer +canonicalizes +codebase +composable +controlled +coprocessor +coprocessors +copyable +cpp +cuQuantum +cuStateVec +cuTensorNet +deallocate +deallocated +deallocates +deallocation +deallocations +desc +deserialize +destructor +discretization +discretize +discretized +eigenstate +enqueues +ensmallen +entangler +entanglers +enum +et +executables +gcc +getitem +init +inlining +instantiation +instantiations +iter +json +len +lfoo +mpiexec +mpirun +namespace +natively +nvidia +nvq +parallelization +precompute +precomputed +prepend +qpu +quantize +quantized +qubit +qubits +qudit +qudits +runtime +str +struct +structs +subclass +subclassed +subclasses +subclassing +subfolder +submodule +subscriptable +subtype +subtypes +subtyping +symplectic +tablegen +templated +toolchain +unitaries +unitary +unoptimized +variadic +variational +workflow diff --git a/.github/workflows/config/spelling_allowlist_cxx.txt b/.github/workflows/config/spelling_allowlist_cxx.txt new file mode 100644 index 0000000000..5c85f6de4b --- /dev/null +++ b/.github/workflows/config/spelling_allowlist_cxx.txt @@ -0,0 +1,4 @@ +clang +gcc +nullary +typedef diff --git a/.github/workflows/create_cache_command.yml b/.github/workflows/create_cache_command.yml new file mode 100644 index 0000000000..55330591b4 --- /dev/null +++ b/.github/workflows/create_cache_command.yml @@ -0,0 +1,73 @@ +on: + workflow_dispatch: + inputs: + pr_url: + required: true + type: string + description: 'The url of the pull_request json object that contains the information about the PR on which the slash command was triggered.' + comment_id: + required: false + type: string + description: 'The id of the comment that contains the slash command that triggered this workflow.' + +name: Create CI cache + +jobs: + config: + name: Configure build + runs-on: ubuntu-latest + + outputs: + pull_request_number: ${{ steps.pr_info.outputs.pull_request_number }} + + steps: + - name: Download PR info + id: pr_info + run: | + pr_info=`wget -O - ${{ inputs.pr_url }}` + pr_nr=`echo $pr_info | jq -r '.number'` + head_label=`echo $pr_info | jq -r '.head.label'` + head_sha=`echo $pr_info | jq -r '.head.sha'` + base_ref=`echo $pr_info | jq -r '.base.ref'` + + echo "target_branch=$base_ref" >> $GITHUB_OUTPUT + echo "source_label=$head_label" >> $GITHUB_OUTPUT + echo "source_sha=$head_sha" >> $GITHUB_OUTPUT + echo "pull_request_number=$pr_nr" >> $GITHUB_OUTPUT + + - name: Add reaction + if: inputs.comment_id + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ inputs.comment_id }} + body: | + Running workflow from ${{ github.ref_type }} `${{ github.ref_name }}`. The created cache will be owned by that branch. + Checking out source code from head `${{ steps.pr_info.outputs.source_label }}` (sha: ${{ steps.pr_info.outputs.source_sha }}). + edit-mode: append + + run_ci: + name: Create caches + needs: config + strategy: + matrix: + toolchain: [llvm, clang16, gcc12] + fail-fast: false + uses: ./.github/workflows/dev_environment.yml + with: + dockerfile: build/devdeps.Dockerfile + toolchain: ${{ matrix.toolchain }} + create_local_cache: true + registry_cache_from: ${{ needs.config.outputs.target_branch }} + pull_request_number: ${{ needs.config.outputs.pull_request_number }} + + finalize: + name: Indicate completion + runs-on: ubuntu-latest + steps: + - name: Add reaction to comment + if: inputs.comment_id + uses: peter-evans/create-or-update-comment@v3 + with: + comment-id: ${{ inputs.comment_id }} + reactions-edit-mode: append + reactions: hooray \ No newline at end of file diff --git a/.github/workflows/deploy_to_registry.yml b/.github/workflows/deploy_to_registry.yml new file mode 100644 index 0000000000..b26350a8a1 --- /dev/null +++ b/.github/workflows/deploy_to_registry.yml @@ -0,0 +1,69 @@ +on: + workflow_call: + inputs: + environment: + required: true + type: string + cache_key: + required: true + type: string + tar_archive: + required: true + type: string + outputs: + image_hash: + description: "The the name and digest of the deployed docker image that can be used to retrieve it independently of any tag updates." + value: ${{ jobs.deployment.outputs.image_hash }} + +name: Deploy image to container registry + +jobs: + deployment: + name: Deployment + runs-on: ubuntu-latest + permissions: write-all + + outputs: + image_hash: ${{ steps.push_image.outputs.image_hash }} + + environment: + name: ${{ inputs.environment }} + url: ${{ vars.deployment_url }} + + steps: + - name: Load tar cache + uses: actions/cache/restore@v3 + with: + path: ${{ inputs.tar_archive }} + key: ${{ inputs.cache_key }} + fail-on-cache-miss: true + + - name: Log in to the container registry + uses: docker/login-action@v2 + if: vars.registry != '' + with: + registry: ${{ vars.registry }} + username: ${{ github.actor }} + password: ${{ github.token }} + + - name: Push image + id: push_image + run: | + # Note that this may change the digest compared to the digest produced during build + # (the saved docker format has its own manifest that doesn't necessarily have the same bit-by-bit format...) + docker load --input ${{ inputs.tar_archive }} + loaded=`docker load --input ${{ inputs.tar_archive }} | grep -o 'Loaded image: \S*:\S*' | cut -d ' ' -f 3` + + image_name=`echo $loaded | cut -d ":" -f 1` + digest_id=`docker push $image_name --all-tags | grep -o 'digest:\ssha256:\S*' | cut -d ":" -f 3` + + image_hash=${image_name}@sha256:$digest_id + echo "image_hash=$image_hash" >> $GITHUB_OUTPUT + + - name: Clean up + run: | + gh extension install actions/gh-actions-cache + echo "Deleting cache $key" + gh actions-cache delete ${{ inputs.cache_key }} -R ${{ github.repository }} --confirm + env: + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/deployments.yml b/.github/workflows/deployments.yml new file mode 100644 index 0000000000..c3f60d2608 --- /dev/null +++ b/.github/workflows/deployments.yml @@ -0,0 +1,82 @@ +on: + workflow_dispatch: + inputs: + update_registry_cache: + type: boolean + description: Create or update the build caches on the container registry. + required: false + default: false + # We need write permissions for packages to update the build caches of GHCR. + # Make sure no external code is checked out as part of this workflow. + pull_request_target: + types: + - closed + branches: + - 'main' + - 'releases/*' + # We update the build caches during the pull_request_target event, + # and deploy the images during pushes to branches with deployment permissions. + push: + branches: + - 'main' + - 'releases/*' + +name: Deployments # do not change name without updating workflow_run triggers + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.base.ref || github.event.ref_name }} + cancel-in-progress: false + +jobs: + devdeps: + if: github.event_name != 'pull_request_target' || github.event.pull_request.merged == true + strategy: + matrix: + toolchain: [llvm, clang16, gcc12] + fail-fast: false + uses: ./.github/workflows/dev_environment.yml + with: + dockerfile: build/devdeps.Dockerfile + toolchain: ${{ matrix.toolchain }} + matrix_key: ${{ matrix.toolchain }} + registry_cache_update: ${{ github.event_name == 'workflow_dispatch' && inputs.update_registry_cache == 'true' }} + registry_cache_update_only: ${{ github.event.pull_request.merged == true }} + environment: ghcr-deployment + + # This job is needed only when using the cloudposse GitHub action to read + # the output of a matrix job. This is a workaround due to current GitHub + # limitations that may not be needed if the work started here concludes: + # https://github.com/actions/runner/pull/2477 + config: + if: github.event_name != 'pull_request_target' + name: Configure build + runs-on: ubuntu-latest + needs: devdeps + + outputs: + json: "${{ steps.read_json.outputs.result }}" + base_image: ${{ fromJson(steps.read_json.outputs.result).image_hash.llvm }} + + steps: + - uses: cloudposse/github-action-matrix-outputs-read@0.1.1 + id: read_json + with: + matrix-step-name: dev_environment + + extdevdeps: + if: github.event_name != 'pull_request_target' + uses: ./.github/workflows/dev_environment.yml + needs: config + with: + dockerfile: build/devdeps.ext.Dockerfile + toolchain: llvm + base_image: ${{ needs.config.outputs.base_image }} + environment: ghcr-deployment + + docker_image: + name: Create Packages + needs: extdevdeps + uses: ./.github/workflows/build_packages.yml + with: + devdeps_image: ${{ needs.extdevdeps.outputs.image_hash }} + environment: ghcr-deployment diff --git a/.github/workflows/dev_environment.yml b/.github/workflows/dev_environment.yml new file mode 100644 index 0000000000..c7748e608b --- /dev/null +++ b/.github/workflows/dev_environment.yml @@ -0,0 +1,301 @@ +on: + workflow_call: + inputs: + dockerfile: + required: true + type: string + build_target: + required: false + type: string + registry_cache_from: + required: false + type: string + local_cache_from: + required: false + type: string + create_local_cache: + required: false + type: boolean + default: false + registry_cache_update: + required: false + type: boolean + default: false + registry_cache_update_only: + required: false + type: boolean + default: false + additional_build_caches: + required: false + type: string + additional_local_caches: + required: false + type: string + base_image: + required: false + type: string + toolchain: + required: false + type: string + matrix_key: + required: false + type: string + pull_request_number: + required: false + type: string + description: The issue number of the pull request to check out. Permits to run the workflow from a different branch than the PR branch. + environment: + required: false + type: string + outputs: + image_hash: + description: "The name and digest of the docker image that was deployed to the registry, which can be used to retrieve it independently of any tag updates." + value: ${{ jobs.finalize.outputs.image_hash }} + cache_key: + description: "The cache key to retrieve a tar archive containing the built image(s)." + value: ${{ jobs.finalize.outputs.cache_key }} + tar_archive: + description: "The location of the tar archive in the cache." + value: ${{ jobs.finalize.outputs.tar_archive }} + build_cache: + description: "The location from which the build cache can be loaded in subsequent builds." + value: ${{ jobs.finalize.outputs.build_cache }} + +name: CUDA Quantum cached dev images + +jobs: + metadata: + name: Metadata + runs-on: ubuntu-latest + permissions: + contents: read + + outputs: + dockerfile: ${{ steps.build_info.outputs.dockerfile }} + owner: ${{ steps.build_info.outputs.owner }} + image_name: ${{ steps.build_info.outputs.image_name }} + image_title: ${{ steps.build_info.outputs.image_title }} + image_id: ${{ steps.build_info.outputs.image_id }} + image_tags: ${{ steps.metadata.outputs.tags }} + image_labels: ${{ steps.metadata.outputs.labels }} + llvm_commit: ${{ steps.build_info.outputs.llvm_commit }} + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + ref: "${{ (inputs.pull_request_number != '' && format('refs/pull/{0}/merge', inputs.pull_request_number)) || '' }}" + + - name: Determine build arguments + id: build_info + run: | + repo_owner=${{ github.repository_owner }} + build_target=${{ inputs.build_target }} + image_id=`basename ${{ inputs.dockerfile }} .Dockerfile`${build_target:+.$build_target} + image_title=cuda-quantum-`echo $image_id | cut -d "." -f 1` + image_name=${{ vars.registry || 'ghcr.io' }}/${repo_owner,,}/$image_title + toolchain=${{ inputs.toolchain }} + tag_prefix=${toolchain:+$toolchain-} + tag_suffix=`echo $image_id | cut -s -d "." -f 2- | xargs -I "%" echo .% | tr . -` + + if ${{ inputs.pull_request_number != '' }}; then + custom_tags="type=raw,value=pr-${{ inputs.pull_request_number }}" + elif ${{ github.event.pull_request.merged == true }}; then + custom_tags="type=raw,value=${{ github.event.pull_request.base.ref }}" + fi + + echo "image_name=$image_name" >> $GITHUB_OUTPUT + echo "image_title=$image_title" >> $GITHUB_OUTPUT + echo "image_id=$image_id" >> $GITHUB_OUTPUT + echo "tag_prefix=$tag_prefix" >> $GITHUB_OUTPUT + echo "tag_suffix=$tag_suffix" >> $GITHUB_OUTPUT + echo "custom_tags=$custom_tags" >> $GITHUB_OUTPUT + echo "dockerfile=${{ inputs.dockerfile }}" >> $GITHUB_OUTPUT + echo "owner=${repo_owner,,}" >> $GITHUB_OUTPUT + echo "llvm_commit=$(git rev-parse @:./tpls/llvm)" >> $GITHUB_OUTPUT + + - name: Extract metadata for Docker image + id: metadata + uses: docker/metadata-action@v4 + with: + images: ${{ steps.build_info.outputs.image_name }} + flavor: | + latest=false + prefix=${{ steps.build_info.outputs.tag_prefix }},onlatest=true + suffix=${{ steps.build_info.outputs.tag_suffix }},onlatest=true + tags: | + # workflow dispatch is covered by these + type=schedule,enable=${{ inputs.pull_request_number == '' }},pattern=nightly + type=ref,enable=${{ inputs.pull_request_number == '' }},event=branch + type=ref,enable=${{ inputs.pull_request_number == '' }},prefix=${{ steps.build_info.outputs.tag_prefix }}pr-,event=pr + type=ref,enable=${{ inputs.pull_request_number == '' }},event=tag + ${{ steps.build_info.outputs.custom_tags }} + labels: | + org.opencontainers.image.title=${{ steps.build_info.outputs.image_title }} + org.opencontainers.image.description=Dev tools for building and testing CUDA Quantum + + build: + name: Caching + runs-on: ubuntu-latest + needs: metadata + timeout-minutes: 600 + permissions: + contents: read + packages: write + + outputs: + tar_cache: ${{ steps.cache.outputs.tar_cache }} + tar_archive: ${{ steps.cache.outputs.tar_archive }} + build_cache: ${{ steps.cache.outputs.build_cache }} + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + ref: "${{ (inputs.pull_request_number != '' && format('refs/pull/{0}/merge', inputs.pull_request_number)) || '' }}" + + - name: Set up buildx runner + uses: docker/setup-buildx-action@v2 + + - name: Log in to GitHub CR + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ github.token }} + + - name: Create cache locations + id: cache + run: | + toolchain=${{ inputs.toolchain }} + registry_cache=ghcr.io/${{ needs.metadata.outputs.owner }}/buildcache-cuda-quantum + nvidia_registry_cache=ghcr.io/nvidia/buildcache-cuda-quantum + registry_cache_base=${{ inputs.registry_cache_from || github.event.pull_request.base.ref || 'main' }} + cache_id=$(echo ${{ needs.metadata.outputs.image_id }} | tr . -)${toolchain:+-$toolchain} + + local_buildcache_path="/tmp/.buildcache-${cache_id}" + local_buildcache_key_suffix="-$(git rev-parse HEAD)" + if ${{ inputs.pull_request_number != '' }}; then + local_buildcache_key="${{ inputs.pull_request_number }}/merge-cuda-quantum-${cache_id}" + else + local_cache_from=${{ inputs.local_cache_from || github.ref_name }} + if ${{ inputs.local_cache_from == '' && github.event.pull_request.merged == true }}; then + local_cache_from=${{ github.event.pull_request.number }}/merge + fi + local_buildcache_key="${local_cache_from}-cuda-quantum-${cache_id}" + fi + + cache_from_gh="type=local,src=${local_buildcache_path}" + cache_from_registry="type=registry,ref=${registry_cache}-${cache_id}:${registry_cache_base}" + cache_from_nvidia_registry="type=registry,ref=${nvidia_registry_cache}-${cache_id}:${registry_cache_base}" + if ${{ inputs.registry_cache_update || inputs.registry_cache_update_only }}; then + build_cache="type=registry,ref=${registry_cache}-${cache_id}:${{ github.ref_name }}" + cache_to="${build_cache},mode=max,ignore-error=false" + elif ${{ inputs.create_local_cache }}; then + # In general, using the build cache from the registry/parent branch is the quickest. + # We hence create a build cache only upon request. + build_cache="$local_buildcache_key" + cache_to="type=local,dest=${local_buildcache_path}-new,mode=max,ignore-error=true" + fi + + echo "local_buildcache_key=$local_buildcache_key" >> $GITHUB_OUTPUT + echo "local_buildcache_key_suffix=$local_buildcache_key_suffix" >> $GITHUB_OUTPUT + echo "local_buildcache_path=$local_buildcache_path" >> $GITHUB_OUTPUT + echo "cache_from_gh=$cache_from_gh" >> $GITHUB_OUTPUT + echo "cache_from_registry=$cache_from_registry" >> $GITHUB_OUTPUT + echo "cache_from_nvidia_registry=$cache_from_nvidia_registry" >> $GITHUB_OUTPUT + echo "cache_to=$cache_to" >> $GITHUB_OUTPUT + echo "build_cache=$build_cache" >> $GITHUB_OUTPUT + if ${{ ! inputs.registry_cache_update_only }}; then + echo "tar_cache=tar-${cache_id}${local_buildcache_key_suffix}" >> $GITHUB_OUTPUT + echo "tar_archive=/tmp/${{ needs.metadata.outputs.image_id }}.tar" >> $GITHUB_OUTPUT + fi + + - name: Check out local cache + uses: actions/cache/restore@v3 + with: + path: ${{ steps.cache.outputs.local_buildcache_path }} + key: ${{ steps.cache.outputs.local_buildcache_key }}${{ steps.cache.outputs.local_buildcache_key_suffix }} + restore-keys: | + ${{ inputs.additional_local_caches }} + ${{ steps.cache.outputs.local_buildcache_key }} + + - name: Build ${{ needs.metadata.outputs.image_title }} image + id: build_image + uses: docker/build-push-action@v4 + with: + context: . + file: ./docker/${{ needs.metadata.outputs.dockerfile }} + target: ${{ inputs.build_target }} + build-args: | + base_image=${{ inputs.base_image }} + toolchain=${{ inputs.toolchain }} + llvm_commit=${{ needs.metadata.outputs.llvm_commit }} + load: false + tags: ${{ needs.metadata.outputs.image_tags }} + labels: ${{ needs.metadata.outputs.image_labels }} + platforms: linux/amd64 + cache-from: | + ${{ inputs.additional_build_caches }} + ${{ steps.cache.outputs.cache_from_gh }} + ${{ steps.cache.outputs.cache_from_registry }} + ${{ steps.cache.outputs.cache_from_nvidia_registry }} + cache-to: ${{ steps.cache.outputs.cache_to }} + outputs: type=docker,dest=${{ steps.cache.outputs.tar_archive }} + + # See also https://github.com/moby/buildkit/issues/1896 + - name: Clean up local cache + run: | + rm -rf "${{ steps.cache.outputs.local_buildcache_path }}" + build_cache="${{ steps.cache.outputs.local_buildcache_path }}-new" + if [ -d "$build_cache" ]; then + mv "$build_cache" "${{ steps.cache.outputs.local_buildcache_path }}" + fi + + - name: Upload build cache + if: inputs.create_local_cache + uses: actions/cache/save@v3 + with: + path: ${{ steps.cache.outputs.local_buildcache_path }} + key: ${{ steps.cache.outputs.local_buildcache_key }}${{ steps.cache.outputs.local_buildcache_key_suffix }} + + - name: Cache ${{ needs.metadata.outputs.image_title }} image + uses: actions/cache/save@v3 + if: ${{ ! inputs.registry_cache_update_only }} + with: + path: ${{ steps.cache.outputs.tar_archive }} + key: ${{ steps.cache.outputs.tar_cache }} + + deployment: + name: Deployment + if: ${{ inputs.environment && ! inputs.registry_cache_update_only }} + needs: build + uses: ./.github/workflows/deploy_to_registry.yml + with: + environment: ${{ inputs.environment }} + cache_key: ${{ needs.build.outputs.tar_cache }} + tar_archive: ${{ needs.build.outputs.tar_archive }} + + finalize: + name: Finalize + runs-on: ubuntu-latest + if: always() && !cancelled() + needs: [metadata, build, deployment] + + outputs: + image_hash: ${{ fromJson(steps.write_json.outputs.result).image_hash }} + cache_key: ${{ fromJson(steps.write_json.outputs.result).cache_key }} + tar_archive: ${{ fromJson(steps.write_json.outputs.result).tar_archive }} + build_cache: ${{ fromJson(steps.write_json.outputs.result).build_cache }} + + steps: + - uses: cloudposse/github-action-matrix-outputs-write@0.3.0 + id: write_json + with: + matrix-step-name: ${{ inputs.matrix_key && 'dev_environment' }} + matrix-key: ${{ inputs.matrix_key }} + outputs: | + image_hash: ${{ needs.deployment.outputs.image_hash }} + cache_key: ${{ needs.build.outputs.tar_cache }} + tar_archive: ${{ needs.build.outputs.tar_archive }} + build_cache: ${{ needs.build.outputs.build_cache }} diff --git a/.github/workflows/help_command.yml b/.github/workflows/help_command.yml new file mode 100644 index 0000000000..8a57714f8c --- /dev/null +++ b/.github/workflows/help_command.yml @@ -0,0 +1,47 @@ +on: + repository_dispatch: + types: [help_command] + +name: Show the slash commands help + +jobs: + example: + if: ${{ github.event.client_payload.github.job == 'command_dispatch' }} + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Read config + id: config + run: | + json_config=`cat .github/workflows/config/command_dispatch_config.json | sed 's/{{.*}}//g'` + entries=`echo "$json_config" | jq -c '.[]'` + help_text="The following commands are available as slash commands:
    " + for entry in $entries; do + command=`echo $entry | jq -r '.command'` + permission=`echo $entry | jq -r '.permission'` + issue_type=`echo $entry | jq -r '.issue_type'` + help_text+="**/$command**:
      " + help_text+="required permissions: ${permission:-write}
      " + help_text+="valid as comment on pull requests and/or issues: ${issue_type:-both}" + help_text+="
    " + done + help_text+="
" + echo "help_text=$help_text" >> $GITHUB_OUTPUT + + - name: Show help + uses: peter-evans/create-or-update-comment@v3 + with: + repository: ${{ github.event.client_payload.github.payload.repository.full_name }} + comment-id: ${{ github.event.client_payload.github.payload.comment.id }} + body: | + ${{ steps.config.outputs.help_text }} + + Available command descriptions: + Command | Description + --- | --- + /help | Shows the slash commands that can be used by commenting on an issue or PR. + /show_context | Shows the context available to slash commands. + /create_cache [ref=...] | Creates a CI build cache for the PR owned by the specified ref (main by default). + edit-mode: append diff --git a/.github/workflows/publish_docs.yml b/.github/workflows/publish_docs.yml new file mode 100644 index 0000000000..f9a00db112 --- /dev/null +++ b/.github/workflows/publish_docs.yml @@ -0,0 +1,94 @@ +on: + workflow_dispatch: + inputs: + artifacts_url: + required: true + type: string + description: 'The url to the artifacts that contain the docs to publish.' + artifact_name: + required: false + type: string + default: cuda_quantum_docs + description: 'The name of the artifact that contain the docs to publish.' + version: + required: true + type: string + description: 'The version that the documentation corresponds to, e.g. 0.3.1 or latest.' + workflow_run: + branches: + - 'main' + - 'releases/*' + workflows: + - Deployments + types: + - completed + +name: Docs publishing + +concurrency: + group: ${{ github.workflow }} # only one docs publishing can be run at a time, since all docs are published to the same location! + cancel-in-progress: false + +jobs: + publish_docs: + name: Publish documentation + runs-on: ubuntu-latest + if: github.event_name == 'workflow_dispatch' || github.event.workflow_run.conclusion == 'success' + + permissions: + contents: write + + environment: + name: github-pages + url: ${{ vars.deployment_url }} + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + ref: ${{ vars.live_branch }} + token: ${{ secrets.REPO_BOT_ACCESS_TOKEN }} + + - name: Download docs artifact + id: artifacts + run: | + if ${{ github.event_name == 'workflow_dispatch' }}; then + target_folder=${{ inputs.version }} + elif ${{ github.event.workflow_run.head_branch == 'main' }}; then + target_folder=latest + else + target_folder=`echo ${{ github.event.workflow_run.head_branch }} | egrep -o "([0-9]{1,}\.)+[0-9]{1,}"` + fi + + artifacts_url=${{ (github.event_name == 'workflow_dispatch' && inputs.artifacts_url) || github.event.workflow_run.artifacts_url }} + artifacts=$(gh api $artifacts_url -q '.artifacts[] | {name: .name, url: .archive_download_url}') + artifact_name=${{ (github.event_name == 'workflow_dispatch' && inputs.artifact_name) || 'cuda_quantum_docs' }} + + status=1 + for artifact in `echo "$artifacts"`; do + name=`echo $artifact | jq -r '.name'` + if [ "$name" == "$artifact_name" ]; then + url=`echo $artifact | jq -r '.url'` + gh api $url > cuda_quantum_docs.zip + + rm -rf "$target_folder" + unzip -d "$target_folder" cuda_quantum_docs.zip + rm -rf cuda_quantum_docs.zip + + git config --global user.name "cuda-quantum-bot" + git config --global user.email "cuda-quantum-bot@users.noreply.github.com" + git add "$target_folder" + + if ${{ github.event_name == 'workflow_dispatch' }}; then + git commit -m "Docs update triggered manually for version ${{ inputs.version }} (artifacts url: ${{ inputs.artifacts_url }}, artifact name: ${{ inputs.artifact_name }})." + else + git commit --allow-empty -m "Docs update triggered by deployment on head branch ${{ github.event.workflow_run.head_branch }}, commit ${{ github.event.workflow_run.head_sha }}." + fi + git push + + status=0 + fi + done + exit $status + env: + GH_TOKEN: ${{ secrets.REPO_BOT_ACCESS_TOKEN }} diff --git a/.github/workflows/repo_checks.yml b/.github/workflows/repo_checks.yml new file mode 100644 index 0000000000..e8469c7e60 --- /dev/null +++ b/.github/workflows/repo_checks.yml @@ -0,0 +1,235 @@ +on: + workflow_dispatch: + pull_request: + +name: "Basic content checks" + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + links: + runs-on: ubuntu-latest + name: "Check links" + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Markdown files + uses: gaurav-nelson/github-action-markdown-link-check@v1 + with: + use-verbose-mode: "yes" + config-file: ".github/workflows/config/md_link_check_config.json" + + license_headers: + name: Check license headers + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: apache/skywalking-eyes/header@v0.4.0 + with: + config: .licenserc.yaml + token: '' # with the appropriate permission license eye can add comments on the PR + + filters: + name: Filter files to check + runs-on: ubuntu-latest + + outputs: + json: ${{ steps.files.outputs.json }} + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: ${{ (github.event_name == 'pull_request' && '0') || '1' }} + + - id: files + run: | + if ${{ github.event_name == 'pull_request' }}; then + diff_base=${{ github.event.pull_request.base.sha }} + git rev-list HEAD..$diff_base # fails the step if something went wrong + list_files="git diff --diff-filter=d --name-only $diff_base --" + + echo "## Changed Files" >> $GITHUB_STEP_SUMMARY + echo "The following files contain changes:" >> $GITHUB_STEP_SUMMARY + for file in `$list_files`; do + echo "$file" >> $GITHUB_STEP_SUMMARY + done + else + list_files="git ls-files" + fi + + json="{\"files\":{}, \"patterns\":{}}" + function create_output { + json=`echo $json | jq ".patterns |= . + {"$1":[]}"` + for pattern in $2; do + pattern=\'$pattern\' + json=`echo $json | jq ".patterns.$1 |= . + [\"$pattern\"]"` + done + + json=`echo $json | jq ".files |= . + {"$1":[]}"` + for file in `echo $2 | xargs $list_files`; do + file=\'$file\' + json=`echo $json | jq ".files.$1 |= . + [\"$file\"]"` + done + } + + create_output cxx '*.cpp *.h *.hpp :!:test :!:tpls :!:**/nlopt-src/*' + create_output cxx_headers '*.h *.hpp :!:test :!:tpls :!:**/nlopt-src/*' + create_output cxx_examples 'docs/sphinx/examples/**/*.cpp' + create_output python '*.py :!:python/tests :!:test :!:tpls :!:docs/sphinx/conf.py' + create_output markdown '*.md :!:tpls' + create_output rst '*.rst :!:tpls' + echo "json=$(echo $json)" >> $GITHUB_OUTPUT + + formatting: + name: Check code formatting + runs-on: ubuntu-latest + needs: filters + + steps: + - uses: actions/checkout@v3 + + - name: C++ + run: | + echo "## C++ Formatting" >> $GITHUB_STEP_SUMMARY + files=(${{ join(fromJSON(needs.filters.outputs.json).files.cxx, ' ') }}) + + if [ "${#files[@]}" -gt "0" ]; then + wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc + sudo add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main" + sudo apt-get update && sudo apt-get install -y --no-install-recommends clang-format-16 + + echo ${files[@]} | xargs clang-format-16 -i + echo ${{ join(fromJSON(needs.filters.outputs.json).patterns.cxx, ' ') }} \ + | xargs git diff --diff-filter=d -- > /tmp/clang-format.patch + else + echo "No files to check." >> $GITHUB_STEP_SUMMARY + exit 0 + fi + + if [ -s /tmp/clang-format.patch ]; then + echo "The following formatting changes need to be applied:" >> $GITHUB_STEP_SUMMARY + echo '```text' >> $GITHUB_STEP_SUMMARY + cat /tmp/clang-format.patch >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo "Totally checked ${#files[@]} files. All files are formatted as expected." >> $GITHUB_STEP_SUMMARY + fi + + - name: Python + run: | + echo "## Python Formatting" >> $GITHUB_STEP_SUMMARY + files=(${{ join(fromJSON(needs.filters.outputs.json).files.python, ' ') }}) + + if [ "${#files[@]}" -gt "0" ]; then + pip install yapf + echo ${files[@]} | xargs yapf --style google --recursive -i + echo ${{ join(fromJSON(needs.filters.outputs.json).patterns.python, ' ') }} \ + | xargs git diff --diff-filter=d -- > /tmp/yapf.patch + else + echo "No files to check." >> $GITHUB_STEP_SUMMARY + exit 0 + fi + + if [ -s /tmp/yapf.patch ]; then + echo "The following formatting changes need to be applied:" >> $GITHUB_STEP_SUMMARY + echo '```text' >> $GITHUB_STEP_SUMMARY + cat /tmp/yapf.patch >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + exit 1 + else + echo "Totally checked ${#files[@]} files. All files are formatted as expected." >> $GITHUB_STEP_SUMMARY + fi + + - name: Markdown + uses: nosborn/github-action-markdown-cli@v3.2.0 + with: + files: . + config_file: '.github/workflows/config/md_lint_config.yml' + + spelling: + name: Check spelling + runs-on: ubuntu-latest + needs: filters + + steps: + - uses: actions/checkout@v3 + + - name: Check spelling allowlist + run: | + for file in `ls .github/workflows/config/spelling_allowlist*.txt`; do + sorted_allowlist=`cat $file | sort` + if [ "$sorted_allowlist" != "$(cat $file)" ]; then + echo "Expecting spelling allowlist in $file to be sorted." + exit 1 + fi + done + + - name: "Markdown files" + uses: rojopolis/spellcheck-github-actions@0.30.0 + with: + config_path: '.github/workflows/config/spellcheck_config.yml' + output_file: markdown_spellcheck.txt + task_name: markdown + source_files: ${{ join(fromJSON(needs.filters.outputs.json).files.markdown, ' ') || '*.nonexistent' }} + + - name: "reStructuredText files" + uses: rojopolis/spellcheck-github-actions@0.30.0 + with: + config_path: '.github/workflows/config/spellcheck_config.yml' + output_file: rst_spellcheck.txt + task_name: rst + source_files: ${{ join(fromJSON(needs.filters.outputs.json).files.rst, ' ') || '*.nonexistent' }} + + - name: "C++ files (headers)" + uses: rojopolis/spellcheck-github-actions@0.30.0 + with: + config_path: '.github/workflows/config/spellcheck_config.yml' + output_file: cxx_headers_spellcheck.txt + task_name: cxx_headers + source_files: ${{ join(fromJSON(needs.filters.outputs.json).files.cxx_headers, ' ') || '*.nonexistent' }} + + - name: "C++ files (examples)" + uses: rojopolis/spellcheck-github-actions@0.30.0 + with: + config_path: '.github/workflows/config/spellcheck_config.yml' + output_file: cxx_examples_spellcheck.txt + task_name: cxx_examples + source_files: ${{ join(fromJSON(needs.filters.outputs.json).files.cxx_examples, ' ') || '*.nonexistent' }} + + - name: "Python files" + uses: rojopolis/spellcheck-github-actions@0.30.0 + with: + config_path: '.github/workflows/config/spellcheck_config.yml' + output_file: python_spellcheck.txt + task_name: python + source_files: ${{ join(fromJSON(needs.filters.outputs.json).files.python, ' ') || '*.nonexistent' }} + + - name: Create summary + run: | + function create_summary { + status=`cat $2_spellcheck.txt | grep "Spelling check" | cut -d ' ' -f 3 | tr -d '!'` + if [ "$status" == "passed" ]; then echo 0; else echo 1; fi + + echo "## $1 Check" >> $GITHUB_STEP_SUMMARY + echo "Spell check ${status}." >> $GITHUB_STEP_SUMMARY + + echo "Output for $1 files:" >> $GITHUB_STEP_SUMMARY + echo '```text' >> $GITHUB_STEP_SUMMARY + cat $2_spellcheck.txt >> $GITHUB_STEP_SUMMARY + echo '```' >> $GITHUB_STEP_SUMMARY + } + + md_status=`create_summary Markdown markdown` + rst_status=`create_summary reStructuredText rst` + cxx_status=`create_summary "C++ Headers" cxx_headers` + cxx_status=`create_summary "C++ Examples" cxx_examples` + py_status=`create_summary Python python` + + if [ ! "$md_status" -eq 0 ]; then exit 1; fi + if [ ! "$rst_status" -eq 0 ]; then exit 2; fi + if [ ! "$cxx_status" -eq 0 ]; then exit 3; fi + if [ ! "$py_status" -eq 0 ]; then exit 4; fi diff --git a/.github/workflows/show_context_command.yml b/.github/workflows/show_context_command.yml new file mode 100644 index 0000000000..7ddbecae11 --- /dev/null +++ b/.github/workflows/show_context_command.yml @@ -0,0 +1,22 @@ +on: + repository_dispatch: + types: [show_context_command] + +name: Show the context available to slash commands + +jobs: + example: + if: ${{ github.event.client_payload.github.job == 'command_dispatch' }} + runs-on: ubuntu-latest + steps: + - name: Show context + uses: peter-evans/create-or-update-comment@v3 + with: + repository: ${{ github.event.client_payload.github.payload.repository.full_name }} + comment-id: ${{ github.event.client_payload.github.payload.comment.id }} + body: | + Full context available to slash commands via github.event.client_payload: + ```json + ${{ toJson(github.event.client_payload) }} + ``` + edit-mode: append diff --git a/.github/workflows/test_in_devenv.yml b/.github/workflows/test_in_devenv.yml new file mode 100644 index 0000000000..633605d949 --- /dev/null +++ b/.github/workflows/test_in_devenv.yml @@ -0,0 +1,88 @@ +on: + workflow_call: + inputs: + devdeps_cache: + required: true + type: string + devdeps_archive: + required: true + type: string + export_environment: + required: false + type: boolean + +name: Run CI within the dev environment container + +jobs: + build_and_test: + name: Dev environment (Debug) + runs-on: ubuntu-latest + permissions: + contents: read + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Restore environment + uses: actions/cache/restore@v3 + with: + path: ${{ inputs.devdeps_archive }} + key: ${{ inputs.devdeps_cache }} + fail-on-cache-miss: true + + - name: Build CUDA Quantum + id: cudaq_build + run: | + loaded=`docker load --input ${{ inputs.devdeps_archive }} | grep -o 'Loaded image: \S*:\S*' | cut -d ' ' -f 3` + devdeps_image=`echo $loaded | cut -d ":" -f 1` + devdeps_tag=`echo $loaded | cut -d ":" -f 2` + + docker build -t cuda-quantum-dev:local -f docker/build/cudaqdev.Dockerfile . \ + --build-arg base_image=${devdeps_image}:${devdeps_tag} \ + --build-arg install="CMAKE_BUILD_TYPE=Debug" + + tag_prefix=`echo $devdeps_tag | cut -d "_" -f 1` + echo "tag_prefix=$tag_prefix" >> $GITHUB_OUTPUT + + - name: Test CUDA Quantum + uses: addnab/docker-run-action@v3 + with: + image: cuda-quantum-dev:local + shell: bash + run: | + cd $CUDAQ_REPO_ROOT + ctest --output-on-failure --test-dir build -E ctest-nvqpp + RESULT_A=$? + /opt/llvm/bin/llvm-lit -v --param nvqpp_site_config=build/test/lit.site.cfg.py build/test + RESULT_B=$? + if [ $RESULT_A -eq 0 ] && [ $RESULT_B -eq 0 ] + then + exit 0 + else + echo "ctest failure status = " $RESULT_A + echo "llvm-lit failure status = " $RESULT_B + exit 1 + fi + + - name: Save environment + id: env_save + if: inputs.export_environment + run: | + output_directory=/tmp + filename=${{ steps.cudaq_build.outputs.tag_prefix }}_build + + docker run --name cuda-quantum-dev cuda-quantum-dev:local + docker export cuda-quantum-dev > $output_directory/$filename.tar + docker rm -f cuda-quantum-dev + + echo "filename=$filename" >> $GITHUB_OUTPUT + echo "output_directory=$output_directory" >> $GITHUB_OUTPUT + + - name: Upload environment + uses: actions/upload-artifact@v3 + if: inputs.export_environment + with: + name: ${{ steps.env_save.outputs.filename }} + path: ${{ steps.env_save.outputs.output_directory }}/${{ steps.env_save.outputs.filename }}.tar + retention-days: 1 \ No newline at end of file diff --git a/Contributing.md b/Contributing.md index a71dff9c7d..bd6243c03c 100644 --- a/Contributing.md +++ b/Contributing.md @@ -12,9 +12,8 @@ of contribution, it will fall into three categories: 1. Share your work built upon CUDA Quantum: - We would love to hear more about your work! Please share with us on - [NVIDIA/cudaq GitHub - Discussions](https://github.com/NVIDIA/cudaq/discussions) or consider + We would love to hear more about your work! Please share with us on [GitHub + Discussions](https://github.com/NVIDIA/cuda-quantum/discussions) or consider contributing to our [examples](./docs/sphinx/examples/)! We also take any CUDA Quantum related questions on this forum. diff --git a/Overview.md b/Overview.md index 5d4ea10d63..ea61536a83 100644 --- a/Overview.md +++ b/Overview.md @@ -1,8 +1,8 @@ # Architecture Overview This document give a high-level overview of the CUDA Quantum codebase. If you -want to familiarize yourself with the code on this repo, this is the document -for you. +want to familiarize yourself with the code on this repository, this is the +document for you. ## Bird's Eye View @@ -32,11 +32,11 @@ the CC dialect. The dialects for Quake, QTX, and CC are defined in -[Quake](https://github.com/NVIDIA/cuda-quantum/blob/main/include/cudaq/Optimizer/Dialect/Quake/QuakeOps.td) +[Quake](https://github.com/NVIDIA/cuda-quantum/blob/releases/v0.3.0/include/cudaq/Optimizer/Dialect/Quake/QuakeOps.td) -[QTX](https://github.com/NVIDIA/cuda-quantum/blob/main/include/cudaq/Optimizer/Dialect/QTX/QTXOps.td) +[QTX](https://github.com/NVIDIA/cuda-quantum/blob/releases/v0.3.0/include/cudaq/Optimizer/Dialect/QTX/QTXOps.td) -[CC](https://github.com/NVIDIA/cuda-quantum/blob/main/include/cudaq/Optimizer/Dialect/CC/CCOps.td) +[CC](https://github.com/NVIDIA/cuda-quantum/blob/releases/v0.3.0/include/cudaq/Optimizer/Dialect/CC/CCOps.td) We have designed the compiler to be modular, and the compiler workflow itself is composed of a set of tools (executables) that achieve a specific task. @@ -136,13 +136,13 @@ process daemon. ### `tools/qpud` This folder implements the `qpud` executable. `qpud` is meant to serve as a -separate daemon process that emulates the true host, classical driver cpu, +separate daemon process that emulates the true host, classical driver CPU, quantum register architectural separation. It implements a client/server model and accepts Quake code, JIT compiles it, and enables its execution on local emulators, or physical remote vendor quantum computers. -This folder contains a TargetBackend type with specializations that target the -nvqir, quantinuum, and rigetti backends. +This folder contains a `TargetBackend` type with specializations that target the +`nvqir`, `quantinuum`, and `rigetti` backends. ### `tools/cudaq-quake` diff --git a/docker/build/cudaqdev.Dockerfile b/docker/build/cudaqdev.Dockerfile new file mode 100644 index 0000000000..567fb3ca2a --- /dev/null +++ b/docker/build/cudaqdev.Dockerfile @@ -0,0 +1,56 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# Usage: +# Build from the repo root with +# docker build -t nvidia/cuda-quantum-dev:latest -f docker/build/cudaqdev.Dockerfile . +# +# If a custom base image is used, then that image (i.e. the build environment) must +# 1) have all the necessary build dependendencies installed +# 2) define the LLVM_INSTALL_PREFIX environment variable indicating where the +# the LLVM binaries that CUDA Quantum depends on are installed +# 3) set the CC and CXX environment variable to use the same compiler toolchain +# as the LLVM dependencies have been built with. + +# To keep the default build environment image to a reasonable size, it does not +# contain the necessary dependencies to develop GPU-based components. You may hence +# see a message along the lines of "no GPU detected" during the CUDA Quantum build. +# Please install the necessary prerequisites listed in the CUDA Quantum build script, +# or use a suitable base image, to enable developing these components. +ARG base_image=ghcr.io/nvidia/cuda-quantum-devdeps:llvm-main +FROM $base_image + +ENV CUDAQ_REPO_ROOT=/workspaces/cuda-quantum +ENV CUDAQ_INSTALL_PREFIX=/usr/local/cudaq +ENV PATH="$CUDAQ_INSTALL_PREFIX/bin:${PATH}" +ENV PYTHONPATH="$CUDAQ_INSTALL_PREFIX:${PYTHONPATH}" + +ARG workspace=. +ARG destination="$CUDAQ_REPO_ROOT" +ADD "$workspace" "$destination" +WORKDIR "$destination" + +# Configuring a base image that contains the necessary dependencies for GPU +# accelerated components and passing a build argument +# install="CMAKE_BUILD_TYPE=Release FORCE_COMPILE_GPU_COMPONENTS=true" +# creates a dev image that can be used as argument to docker/release/cudaq.Dockerfile +# to create the released cuda-quantum image. +ARG install= +RUN if [ -n "$install" ]; \ + then \ + expected_prefix=$CUDAQ_INSTALL_PREFIX; \ + export $install; \ + bash scripts/build_cudaq.sh -v; \ + if [ ! "$?" -eq "0" ]; then \ + exit 1; \ + elif [ "$CUDAQ_INSTALL_PREFIX" != "$expected_prefix" ]; then \ + mkdir -p "$expected_prefix"; \ + mv "$CUDAQ_INSTALL_PREFIX"/* "$expected_prefix"; \ + rmdir "$CUDAQ_INSTALL_PREFIX"; \ + fi \ + fi diff --git a/docker/build/devdeps.Dockerfile b/docker/build/devdeps.Dockerfile new file mode 100644 index 0000000000..cb3f9c2161 --- /dev/null +++ b/docker/build/devdeps.Dockerfile @@ -0,0 +1,145 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# This file builds the development environment that contains the necessary development +# dependencies for building and testing CUDA Quantum. This does not include the CUDA, OpenMPI +# and other dependencies that some of the simulator backends require. These backends +# will be omitted from the build if this environment is used. +# +# Usage: +# Must be built from the repo root with: +# docker build -t ghcr.io/nvidia/cuda-quantum-devdeps:${toolchain}-latest -f docker/build/devdeps.Dockerfile --build-arg toolchain=$toolchain . +# +# The variable $toolchain indicates which compiler toolchain to build the LLVM libraries with. +# The toolchain used to build the LLVM binaries that CUDA Quantum depends on must be used to build +# CUDA Quantum. This image sets the CC and CXX environment variables to use that toolchain. +# Currently, llvm (default), clang16, clang15, gcc12, and gcc11 are supported. To use a different +# toolchain, add support for it to the install_toolchain.sh script. If the toolchain is set to llvm, +# then the toolchain will be built from source. + +# Build additional tools needed for CUDA Quantum documentation generation. +FROM ubuntu:22.04 as doxygenbuild +RUN apt-get update && apt-get install -y wget unzip make cmake flex bison gcc g++ python3 \ + && wget https://github.com/doxygen/doxygen/archive/9a5686aeebff882ebda518151bc5df9d757ea5f7.zip -q -O repo.zip \ + && unzip repo.zip && mv doxygen* repo && rm repo.zip \ + && cmake -G "Unix Makefiles" repo && cmake --build . --target install --config Release \ + && rm -rf repo && apt-get remove -y wget unzip make cmake flex bison gcc g++ python3 \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +FROM ubuntu:22.04 as llvmbuild +SHELL ["/bin/bash", "-c"] + +ARG llvm_commit +ARG toolchain=llvm + +# When a dialogue box would be needed during install, assume default configurations. +# Set here to avoid setting it for all install commands. +# Given as arg to make sure that this value is only set during build but not in the launched container. +ARG DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates openssl apt-utils \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Install prerequisites for building LLVM +RUN apt-get update && apt-get install -y --no-install-recommends \ + ninja-build cmake python3 \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Clone the LLVM source code +RUN apt-get update && apt-get install -y --no-install-recommends git \ + && mkdir /llvm-project && cd /llvm-project && git init \ + && git remote add origin https://github.com/llvm/llvm-project \ + && git fetch origin --depth=1 $llvm_commit && git reset --hard FETCH_HEAD \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Build the the LLVM libraries and compiler toolchain needed to build CUDA Quantum; +# The safest option to avoid any compatibility issues is to build an application using these libraries +# with the same compiler toolchain that the libraries were compiled with. +# Since the llvm libraries needed to build CUDA Quantum include the compiler toolchain, we can build +# CUDA Quantum itself with that compiler as well. This is done when llvm is specified as the desired +# toolchain. For more information about compatibility between different C++ compilers, see e.g. +# - Itanium C++ ABI and C++ Standard Library implementations +# - https://libcxx.llvm.org/ +# - https://clang.llvm.org/docs/MSVCCompatibility.html +# - https://clang.llvm.org/docs/Toolchain.html +# - https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html +# - https://gcc.gnu.org/onlinedocs/gcc/Code-Gen-Options.html#Code%20Gen%20Options +# - https://gcc.gnu.org/onlinedocs/gcc/C_002b_002b-Dialect-Options.html#C_002b_002b-Dialect-Options +ADD ./scripts/install_toolchain.sh /scripts/install_toolchain.sh +ADD ./scripts/build_llvm.sh /scripts/build_llvm.sh +RUN LLVM_INSTALL_PREFIX=/opt/llvm LLVM_SOURCE=/llvm-project \ + source scripts/install_toolchain.sh -e /opt/llvm/bootstrap -t ${toolchain} +RUN source /opt/llvm/bootstrap/init_command.sh && \ + LLVM_INSTALL_PREFIX=/opt/llvm \ + bash /scripts/build_llvm.sh -s /llvm-project -c Release -v \ + && rm -rf /llvm-project + +# We use a newer version of cmake that is only available via the Kitware apt repository. +FROM ubuntu:22.04 as cmakebuild +RUN apt-get update && apt-get install -y wget unzip make gcc g++ libssl-dev \ + && wget https://github.com/Kitware/CMake/releases/download/v3.26.3/cmake-3.26.3.zip -q \ + && unzip cmake-3.26.3.zip && rm cmake-3.26.3.zip \ + && cd cmake-3.26.3 && ./bootstrap --prefix=/usr/local/cmake-3.26/ \ + && make -j$(nproc) && make -j$(nproc) install && cd .. \ + && rm -rf cmake-3.26.3 && apt-get remove -y wget unzip make gcc g++ libssl-dev \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] + +# When a dialogue box would be needed during install, assume default configurations. +# Set here to avoid setting it for all install commands. +# Given as arg to make sure that this value is only set during build but not in the launched container. +ARG DEBIAN_FRONTEND=noninteractive +ENV HOME=/home SHELL=/bin/bash LANG=C.UTF-8 LC_ALL=C.UTF-8 + +# Copy over doxygen. +COPY --from=doxygenbuild /usr/local/bin/doxygen /usr/local/bin/doxygen +ENV PATH="${PATH}:/usr/local/bin" + +# Copy over the llvm build dependencies. +COPY --from=llvmbuild /opt/llvm /opt/llvm +ENV LLVM_INSTALL_PREFIX=/opt/llvm +ENV PATH="$PATH:$LLVM_INSTALL_PREFIX/bin/" + +# Install the C++ standard library. We could alternatively build libc++ +# as part of the LLVM build and compile against that instead of libstdc++. +RUN apt-get update && apt-get install -y --no-install-recommends libstdc++-12-dev \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* +ENV CPLUS_INCLUDE_PATH="$CPLUS_INCLUDE_PATH:/usr/include/c++/11/:/usr/include/x86_64-linux-gnu/c++/11" + +# Install the C/C++ compiler toolchain with which the LLVM dependencies have +# been built. CUDA Quantum needs to be built with that same toolchain. We use +# a wrapper script so that the path that we set CC and CXX to is independent +# on the installed toolchain. Unfortunately, a symbolic link won't work. +# Using update-alternatives for c++ and cc could maybe be a better option. +RUN source "$LLVM_INSTALL_PREFIX/bootstrap/init_command.sh" \ + && echo -e '#!/bin/bash\n"'$CC'" "$@"' > "$LLVM_INSTALL_PREFIX/bootstrap/cc" \ + && echo -e '#!/bin/bash\n"'$CXX'" "$@"' > "$LLVM_INSTALL_PREFIX/bootstrap/cxx" \ + && chmod +x "$LLVM_INSTALL_PREFIX/bootstrap/cc" \ + && chmod +x "$LLVM_INSTALL_PREFIX/bootstrap/cxx" +ENV CC="$LLVM_INSTALL_PREFIX/bootstrap/cc" +ENV CXX="$LLVM_INSTALL_PREFIX/bootstrap/cxx" + +# Install additional dependencies required to build and test CUDA Quantum. +COPY --from=cmakebuild /usr/local/cmake-3.26/ /usr/local/cmake-3.26/ +ENV PATH="${PATH}:/usr/local/cmake-3.26/bin" +RUN apt-get update && apt-get install -y --no-install-recommends \ + git ninja-build libcurl4-openssl-dev libssl-dev \ + python3 python3-pip libpython3-dev \ + libblas-dev \ + && python3 -m pip install --no-cache-dir \ + lit pytest numpy \ + fastapi uvicorn pydantic llvmlite \ + openfermionpyscf \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Install additional tools for CUDA Quantum documentation generation. +RUN python3 -m pip install --no-cache-dir \ + sphinx==5.3.0 sphinx_rtd_theme==1.2.0 sphinx-reredirects==0.1.2 \ + enum-tools[sphinx] breathe==4.34.0 myst-parser==1.0.0 diff --git a/docker/build/devdeps.ext.Dockerfile b/docker/build/devdeps.ext.Dockerfile new file mode 100644 index 0000000000..b1ab53fc38 --- /dev/null +++ b/docker/build/devdeps.ext.Dockerfile @@ -0,0 +1,305 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# This file extends the CUDA Quantum development dependencies to include the necessary +# dependencies for GPU components and backends. This image include an OpenMPI +# installation as well as the configured CUDA packages. Which CUDA packages are +# included is defined by the cuda_packages argument. +# +# Usage: +# Must be built from the repo root with: +# docker build -t ghcr.io/nvidia/cuda-quantum-devdeps:${toolchain}-ext -f docker/build/devdeps.ext.Dockerfile . +# +# The variable $toolchain should indicate which compiler toolchain the development environment +# which this image extends is configure with; see also docker/build/devdeps.Dockerfile. + +ARG base_image=ghcr.io/nvidia/cuda-quantum-devdeps:gcc12-main + +FROM nvidia/cuda:11.8.0-devel-ubuntu22.04 as ompibuild +SHELL ["/bin/bash", "-c"] +ARG DEBIAN_FRONTEND=noninteractive + +ENV CUDA_INSTALL_PREFIX=/usr/local/cuda-11.8 +ENV COMMON_COMPILER_FLAGS="-march=x86-64-v3 -mtune=generic -O2 -pipe" + +# 1 - Install basic tools needed for the builds + +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc g++ gfortran python3 python3-pip \ + libcurl4-openssl-dev libssl-dev liblapack-dev libpython3-dev \ + bzip2 make sudo vim curl git wget \ + && pip install --no-cache-dir numpy \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# 2 - Install SLURM PMI2 version 21.08.8 + +ENV PMI_INSTALL_PREFIX=/usr/local/pmi +RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://download.schedmd.com/slurm/slurm-21.08.8.tar.bz2 \ + && tar -x -f /var/tmp/slurm-21.08.8.tar.bz2 -C /var/tmp -j && cd /var/tmp/slurm-21.08.8 \ + && CC=gcc CFLAGS="$COMMON_COMPILER_FLAGS" \ + CXX=g++ CXXFLAGS="$COMMON_COMPILER_FLAGS" \ + F77=gfortran F90=gfortran FFLAGS="$COMMON_COMPILER_FLAGS" \ + FC=gfortran FCFLAGS="$COMMON_COMPILER_FLAGS" \ + LDFLAGS=-Wl,--as-needed \ + ./configure --prefix="$PMI_INSTALL_PREFIX" \ + && make -C contribs/pmi2 install \ + && rm -rf /var/tmp/slurm-21.08.8 /var/tmp/slurm-21.08.8.tar.bz2 + +# 3 - Install Mellanox OFED version 5.3-1.0.0.1 + +RUN wget -qO - https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox | apt-key add - \ + && mkdir -p /etc/apt/sources.list.d && wget -q -nc --no-check-certificate -P /etc/apt/sources.list.d https://linux.mellanox.com/public/repo/mlnx_ofed/5.3-1.0.0.1/ubuntu20.04/mellanox_mlnx_ofed.list \ + && apt-get update -y && apt-get install -y --no-install-recommends \ + ibverbs-providers ibverbs-utils \ + libibmad-dev libibmad5 libibumad-dev libibumad3 \ + libibverbs-dev libibverbs1 \ + librdmacm-dev librdmacm1 \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# 4 - Install GDRCOPY version 2.1 + +ENV GDRCOPY_INSTALL_PREFIX=/usr/local/gdrcopy +RUN apt-get update -y && apt-get install -y --no-install-recommends \ + autoconf automake \ + libgcrypt20-dev libnuma-dev libtool \ + && mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/NVIDIA/gdrcopy/archive/v2.1.tar.gz \ + && tar -x -f /var/tmp/v2.1.tar.gz -C /var/tmp -z && cd /var/tmp/gdrcopy-2.1 \ + && mkdir -p "$GDRCOPY_INSTALL_PREFIX/include" "$GDRCOPY_INSTALL_PREFIX/lib64" \ + && make PREFIX="$GDRCOPY_INSTALL_PREFIX" lib lib_install \ + && echo "$GDRCOPY_INSTALL_PREFIX/lib64" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig \ + && rm -rf /var/tmp/gdrcopy-2.1 /var/tmp/v2.1.tar.gz \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +ENV CPATH="$GDRCOPY_INSTALL_PREFIX/include:$CPATH" +ENV LIBRARY_PATH="$GDRCOPY_INSTALL_PREFIX/lib64:$LIBRARY_PATH" + +# 5 - Install UCX version v1.13.1 + +ENV UCX_INSTALL_PREFIX=/usr/local/ucx +RUN mkdir -p /var/tmp && cd /var/tmp \ + && git clone https://github.com/openucx/ucx.git ucx && cd /var/tmp/ucx \ + && git checkout v1.13.1 \ + && ./autogen.sh \ + && CC=gcc CFLAGS="$COMMON_COMPILER_FLAGS" \ + CXX=g++ CXXFLAGS="$COMMON_COMPILER_FLAGS" \ + F77=gfortran F90=gfortran FFLAGS="$COMMON_COMPILER_FLAGS" \ + FC=gfortran FCFLAGS="$COMMON_COMPILER_FLAGS" \ + LDFLAGS=-Wl,--as-needed \ + ./configure --prefix="$UCX_INSTALL_PREFIX" \ + --with-cuda="$CUDA_INSTALL_PREFIX" --with-gdrcopy="$GDRCOPY_INSTALL_PREFIX" \ + --disable-assertions --disable-backtrace-detail --disable-debug \ + --disable-params-check --disable-static \ + --disable-doxygen-doc --disable-logging \ + --enable-mt \ + && make -j$(nproc) && make -j$(nproc) install \ + && rm -rf /var/tmp/ucx + +# 6 - Install MUNGE version 0.5.14 + +ENV MUNGE_INSTALL_PREFIX=/usr/local/munge +RUN mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/dun/munge/releases/download/munge-0.5.14/munge-0.5.14.tar.xz \ + && tar -x -f /var/tmp/munge-0.5.14.tar.xz -C /var/tmp -J && cd /var/tmp/munge-0.5.14 \ + && CC=gcc CFLAGS="$COMMON_COMPILER_FLAGS" \ + CXX=g++ CXXFLAGS="$COMMON_COMPILER_FLAGS" \ + F77=gfortran F90=gfortran FFLAGS="$COMMON_COMPILER_FLAGS" \ + FC=gfortran FCFLAGS="$COMMON_COMPILER_FLAGS" \ + LDFLAGS=-Wl,--as-needed \ + ./configure --prefix="$MUNGE_INSTALL_PREFIX" \ + && make -j$(nproc) && make -j$(nproc) install \ + && rm -rf /var/tmp/munge-0.5.14 /var/tmp/munge-0.5.14.tar.xz + +# 7 - Install PMIX version 3.2.3 + +ENV PMIX_INSTALL_PREFIX=/usr/local/pmix +RUN apt-get update -y && apt-get install -y --no-install-recommends \ + hwloc libevent-dev \ + && mkdir -p /var/tmp && wget -q -nc --no-check-certificate -P /var/tmp https://github.com/openpmix/openpmix/releases/download/v3.2.3/pmix-3.2.3.tar.gz \ + && tar -x -f /var/tmp/pmix-3.2.3.tar.gz -C /var/tmp -z && cd /var/tmp/pmix-3.2.3 \ + && CC=gcc CFLAGS="$COMMON_COMPILER_FLAGS" \ + CXX=g++ CXXFLAGS="$COMMON_COMPILER_FLAGS" \ + F77=gfortran F90=gfortran FFLAGS="$COMMON_COMPILER_FLAGS" \ + FC=gfortran FCFLAGS="$COMMON_COMPILER_FLAGS" \ + LDFLAGS=-Wl,--as-needed \ + ./configure --prefix="$PMIX_INSTALL_PREFIX" \ + --with-munge="$MUNGE_INSTALL_PREFIX" \ + && make -j$(nproc) && make -j$(nproc) install \ + && rm -rf /var/tmp/pmix-3.2.3 /var/tmp/pmix-3.2.3.tar.gz \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +ENV CPATH="$PMIX_INSTALL_PREFIX/include:$CPATH" \ + LD_LIBRARY_PATH="$PMIX_INSTALL_PREFIX/lib:$LD_LIBRARY_PATH" \ + PATH="$PMIX_INSTALL_PREFIX/bin:$PATH" + +# 8 - Install OMPI version 4.1.4 + +ENV OPENMPI_INSTALL_PREFIX=/usr/local/openmpi +RUN apt-get update -y && apt-get install -y --no-install-recommends \ + flex openssh-client \ + && mkdir -p /var/tmp && cd /var/tmp \ + && git clone https://github.com/open-mpi/ompi.git ompi && cd /var/tmp/ompi \ + && git checkout v4.1.4 \ + && ./autogen.pl \ + && CC=gcc CFLAGS="$COMMON_COMPILER_FLAGS" \ + CXX=g++ CXXFLAGS="$COMMON_COMPILER_FLAGS" \ + F77=gfortran F90=gfortran FFLAGS="$COMMON_COMPILER_FLAGS" \ + FC=gfortran FCFLAGS="$COMMON_COMPILER_FLAGS" \ + LDFLAGS=-Wl,--as-needed \ + ./configure --prefix="$OPENMPI_INSTALL_PREFIX" \ + --disable-getpwuid --disable-static \ + --disable-debug --disable-mem-debug --disable-mem-profile --disable-memchecker \ + --enable-mca-no-build=btl-uct --enable-mpi1-compatibility --enable-oshmem \ + --with-cuda="$CUDA_INSTALL_PREFIX" \ + --with-slurm --with-pmi="$PMI_INSTALL_PREFIX" \ + --with-pmix="$PMIX_INSTALL_PREFIX" \ + --with-ucx="$UCX_INSTALL_PREFIX" \ + --without-verbs \ + && make -j$(nproc) && make -j$(nproc) install \ + && rm -rf /var/tmp/ompi \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Build the final image that has CUDA Quantum and all its dev dependencies installed, as well as +# OpenMPI, its dependencies, and additional tools for developing CUDA Quantum backends and extensions. +FROM $base_image +SHELL ["/bin/bash", "-c"] + +# When a dialogue box would be needed during install, assume default configurations. +# Set here to avoid setting it for all install commands. +# Given as arg to make sure that this value is only set during build but not in the launched container. +ARG DEBIAN_FRONTEND=noninteractive +RUN apt update && apt-get install -y --no-install-recommends \ + ca-certificates openssl wget \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Install Mellanox OFED runtime dependencies. + +RUN apt-get update && apt-get install -y --no-install-recommends gnupg \ + && wget -qO - https://www.mellanox.com/downloads/ofed/RPM-GPG-KEY-Mellanox | apt-key add - \ + && mkdir -p /etc/apt/sources.list.d && wget -q -nc --no-check-certificate -P /etc/apt/sources.list.d https://linux.mellanox.com/public/repo/mlnx_ofed/5.3-1.0.0.1/ubuntu20.04/mellanox_mlnx_ofed.list \ + && apt-get update -y && apt-get install -y --no-install-recommends \ + ibverbs-providers ibverbs-utils \ + libibmad5 libibumad3 libibverbs1 librdmacm1 \ + && apt-get remove -y gnupg \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Copy over SLURM PMI2. + +COPY --from=ompibuild /usr/local/pmi /usr/local/pmi +ENV PMI_INSTALL_PREFIX=/usr/local/pmi +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$PMIX_INSTALL_PREFIX/lib" + +# Copy over GDRCOPY and install runtime dependencies. + +COPY --from=ompibuild /usr/local/gdrcopy /usr/local/gdrcopy +ENV GDRCOPY_INSTALL_PREFIX=/usr/local/gdrcopy +ENV CPATH="$GDRCOPY_INSTALL_PREFIX/include:$CPATH" +ENV LIBRARY_PATH="$GDRCOPY_INSTALL_PREFIX/lib64:$LIBRARY_PATH" + +RUN echo "$GDRCOPY_INSTALL_PREFIX/lib64" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig \ + && apt-get update -y && apt-get install -y --no-install-recommends \ + libgcrypt20 libnuma1 \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Copy over UCX. + +COPY --from=ompibuild /usr/local/ucx /usr/local/ucx +ENV UCX_INSTALL_PREFIX=/usr/local/ucx +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$UCX_INSTALL_PREFIX/lib" + +# Copy over MUNGE. + +COPY --from=ompibuild /usr/local/munge /usr/local/munge +ENV MUNGE_INSTALL_PREFIX=/usr/local/munge + +# Copy over PMIX and install runtime dependencies. + +COPY --from=ompibuild /usr/local/pmix /usr/local/pmix +ENV PMIX_INSTALL_PREFIX=/usr/local/pmix +ENV PATH="$PMIX_INSTALL_PREFIX/bin:$PATH" +ENV CPATH="$PMIX_INSTALL_PREFIX/include:$CPATH" +ENV LD_LIBRARY_PATH="$PMIX_INSTALL_PREFIX/lib:$LD_LIBRARY_PATH" + +RUN apt-get update -y && apt-get install -y --no-install-recommends \ + hwloc libevent-dev \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Copy over OpenMPI and install runtime dependencies. + +COPY --from=ompibuild /usr/local/openmpi /usr/local/openmpi +ENV OPENMPI_INSTALL_PREFIX=/usr/local/openmpi +ENV MPI_HOME="$OPENMPI_INSTALL_PREFIX" +ENV MPI_ROOT="$OPENMPI_INSTALL_PREFIX" +ENV PATH="$PATH:$OPENMPI_INSTALL_PREFIX/bin" +ENV CPATH="$OPENMPI_INSTALL_PREFIX/include:/usr/local/ofed/5.0-0/include:$CPATH" +ENV LIBRARY_PATH="/usr/local/ofed/5.0-0/lib:$LIBRARY_PATH" +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$OPENMPI_INSTALL_PREFIX/lib" + +RUN echo "/usr/local/openmpi/lib" >> /etc/ld.so.conf.d/hpccm.conf && ldconfig \ + && apt-get update -y && apt-get install -y --no-install-recommends \ + flex openssh-client \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Set some configurations in the form of environment variables. + +ENV OMPI_MCA_btl=^smcuda,vader,tcp,uct,openib +ENV OMPI_MCA_pml=ucx +ENV UCX_IB_PCI_RELAXED_ORDERING=on +ENV UCX_MAX_RNDV_RAILS=1 +ENV UCX_MEMTYPE_CACHE=n +ENV UCX_TLS=rc,cuda_copy,cuda_ipc,gdr_copy,sm + +# Install cuQuantum libraries. + +RUN apt-get update && apt-get install -y --no-install-recommends xz-utils \ + && wget https://developer.download.nvidia.com/compute/cuquantum/redist/cuquantum/linux-x86_64/cuquantum-linux-x86_64-22.11.0.13-archive.tar.xz \ + && tar xf cuquantum-linux-x86_64-22.11.0.13-archive.tar.xz \ + && mkdir -p /opt/nvidia && mv cuquantum-linux-x86_64-22.11.0.13-archive /opt/nvidia/cuquantum \ + && cd / && rm -rf cuquantum-linux-x86_64-22.11.0.13-archive.tar.xz \ + && apt-get remove -y xz-utils \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +ENV CUQUANTUM_INSTALL_PREFIX=/opt/nvidia/cuquantum +ENV LD_LIBRARY_PATH="$CUQUANTUM_INSTALL_PREFIX/lib:$LD_LIBRARY_PATH" + +# Install cuTensor libraries. +RUN apt-get update && apt-get install -y --no-install-recommends xz-utils \ + && wget https://developer.download.nvidia.com/compute/cutensor/redist/libcutensor/linux-x86_64/libcutensor-linux-x86_64-1.6.2.3-archive.tar.xz \ + && tar xf libcutensor-linux-x86_64-1.6.2.3-archive.tar.xz && cd libcutensor-linux-x86_64-1.6.2.3-archive \ + && mkdir -p /opt/nvidia/cutensor && mv include /opt/nvidia/cutensor/ && mv lib/11 /opt/nvidia/cutensor/lib \ + && cd / && rm -rf libcutensor-linux-x86_64-1.6.2.3-archive* \ + && apt-get remove -y xz-utils \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +ENV CUTENSOR_INSTALL_PREFIX=/opt/nvidia/cutensor +ENV LD_LIBRARY_PATH="$CUTENSOR_INSTALL_PREFIX/lib:$LD_LIBRARY_PATH" + +# Install CUDA 11.8. + +ARG cuda_packages="cuda-cudart-11-8 cuda-compiler-11-8 libcublas-dev-11-8" +RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.0-1_all.deb \ + && dpkg -i cuda-keyring_1.0-1_all.deb \ + && apt-get update && apt-get install -y --no-install-recommends $cuda_packages \ + && rm cuda-keyring_1.0-1_all.deb \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# The installation of CUDA above creates files that will be injected upon launching the container +# with the --gpu=all flag. This creates issues upon container launch. We hence remove these files. +# As long as the container is launched with the --gpu=all flag, the GPUs remain accessible and CUDA +# is fully functional. See also https://github.com/NVIDIA/nvidia-docker/issues/1699. +RUN rm -rf \ + /usr/lib/x86_64-linux-gnu/libcuda.so* \ + /usr/lib/x86_64-linux-gnu/libnvcuvid.so* \ + /usr/lib/x86_64-linux-gnu/libnvidia-*.so* \ + /usr/lib/firmware \ + /usr/local/cuda/compat/lib + +ENV CUDA_INSTALL_PREFIX=/usr/local/cuda-11.8 +ENV CUDA_HOME="$CUDA_INSTALL_PREFIX" +ENV CUDA_ROOT="$CUDA_INSTALL_PREFIX" +ENV CUDA_PATH="$CUDA_INSTALL_PREFIX" +ENV PATH="${CUDA_INSTALL_PREFIX}/lib64/:${PATH}:${CUDA_INSTALL_PREFIX}/bin" +ENV LD_LIBRARY_PATH="${CUDA_INSTALL_PREFIX}/lib64:${CUDA_INSTALL_PREFIX}/extras/CUPTI/lib64:${LD_LIBRARY_PATH}" diff --git a/docker/release/README.md b/docker/release/README.md new file mode 100644 index 0000000000..b858a9711c --- /dev/null +++ b/docker/release/README.md @@ -0,0 +1,22 @@ +# Welcome to CUDA Quantum + +The [CUDA Quantum](https://developer.nvidia.com/cuda-quantum) toolset for hybrid +quantum-classical computers enables integration and programming of quantum +processing units (QPUs), GPUs, and CPUs in one system. + +This Docker image contains contains all necessary tool for application +development using CUDA Quantum in C++ or Python. This includes an installation +of the NVQ++ compiler, the CUDA Quantum runtime, as well as a selection of +integrated CPU and GPU backends for rapid application development and testing. +Additional software can be installed into the container using the password +`cuda-quantum`. + +The image includes a folder with examples in C++ and Python in the home +directory. You can find more information about CUDA Quantum including a link to +the documentation on our [GitHub +repository](https://github.com/NVIDIA/cuda-quantum). + +The CUDA Quantum installation in this image is licensed under [Apache License +2.0](https://www.apache.org/licenses/LICENSE-2.0). More information about the +license and third party libraries can be found in the LICENSE and NOTICE files +of the CUDA Quantum installation folder defined by `CUDA_QUANTUM_PATH`. diff --git a/docker/release/cudaq.Dockerfile b/docker/release/cudaq.Dockerfile new file mode 100644 index 0000000000..7000f31af3 --- /dev/null +++ b/docker/release/cudaq.Dockerfile @@ -0,0 +1,127 @@ +# ============================================================================ # +# Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. # +# All rights reserved. # +# # +# This source code and the accompanying materials are made available under # +# the terms of the Apache License 2.0 which accompanies this distribution. # +# ============================================================================ # + +# This file builds an image that contains a CUDA Quantum installation and all necessary runtime +# dependencies for using CUDA Quantum. +# +# This image requires specifing an image as argument that contains a CUDA Quantum installation +# along with its development dependencies. This file then copies that installation into a more +# minimal runtime environment. +# A suitable dev image can be obtained by building docker/build/cudaqdev.Dockerfile. +# +# Usage: +# Must be built from the repo root with: +# docker build -t ghcr.io/nvidia/cuda-quantum:latest -f docker/release/cudaq.Dockerfile . +# +# The build argument dev_image defines the CUDA Quantum dev image to use, and the argument +# dev_tag defines the tag of that image. + +ARG dev_image=nvidia/cuda-quantum-dev +ARG dev_tag=latest +FROM $dev_image:$dev_tag as cudaqbuild + +# Unfortunately, there is no way to use the environment variables defined in the dev image +# to determine where to copy files from. See also e.g. https://github.com/moby/moby/issues/37345 +# The rather ugly work around to achieve encapsulation is to make a copy here were we have +# access to the environment variables, so that the hardcoded paths in this file don't need to +# match the paths in the dev image. +RUN if [ "$LLVM_INSTALL_PREFIX" != "/usr/local/llvm" ]; then mv "$LLVM_INSTALL_PREFIX" /usr/local/llvm; fi +RUN if [ "$CUDAQ_INSTALL_PREFIX" != "/usr/local/cudaq" ]; then mv "$CUDAQ_INSTALL_PREFIX" /usr/local/cudaq; fi +RUN mkdir -p /usr/local/cuquantum && \ + if [ "$CUQUANTUM_INSTALL_PREFIX" != "/usr/local/cuquantum" ] && [ -d "$CUQUANTUM_INSTALL_PREFIX" ]; then \ + mv "$CUQUANTUM_INSTALL_PREFIX"/* /usr/local/cuquantum; \ + fi + +FROM ubuntu:22.04 +SHELL ["/bin/bash", "-c"] +ENV SHELL=/bin/bash LANG=C.UTF-8 LC_ALL=C.UTF-8 + +# When a dialogue box would be needed during install, assume default configurations. +# Set here to avoid setting it for all install commands. +# Given as arg to make sure that this value is only set during build but not in the launched container. +ARG DEBIAN_FRONTEND=noninteractive +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates openssl wget git sudo vim \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* + +# Install CUDA Quantum runtime dependencies. + +RUN apt-get update && apt-get install -y --no-install-recommends \ + python3 python3-pip libpython3-dev \ + libstdc++-12-dev \ + libcurl4-openssl-dev libssl-dev \ + && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* \ + && python3 -m pip install --no-cache-dir numpy \ + && ln -s /bin/python3 /bin/python + +ENV CPLUS_INCLUDE_PATH="$CPLUS_INCLUDE_PATH:/usr/include/c++/11/:/usr/include/x86_64-linux-gnu/c++/11" + +# Copy over the CUDA Quantum installation, and the necessary compiler tools. + +ARG release_version= +ENV CUDA_QUANTUM_VERSION=$release_version +ENV CUDA_QUANTUM_PATH="/opt/nvidia/cudaq" + +COPY --from=cudaqbuild "/usr/local/llvm/bin/clang++" "$CUDA_QUANTUM_PATH/llvm/bin/clang++" +COPY --from=cudaqbuild "/usr/local/llvm/lib/clang" "$CUDA_QUANTUM_PATH/llvm/lib/clang" +COPY --from=cudaqbuild "/usr/local/llvm/bin/llc" "$CUDA_QUANTUM_PATH/llvm/bin/llc" +COPY --from=cudaqbuild "/usr/local/llvm/bin/lld" "$CUDA_QUANTUM_PATH/llvm/bin/lld" +COPY --from=cudaqbuild "/usr/local/llvm/bin/ld.lld" "$CUDA_QUANTUM_PATH/llvm/bin/ld.lld" +COPY --from=cudaqbuild "/usr/local/cuquantum/" "$CUDA_QUANTUM_PATH/cuquantum/" +COPY --from=cudaqbuild "/usr/local/cudaq/" "$CUDA_QUANTUM_PATH" + +ENV PATH "${PATH}:$CUDA_QUANTUM_PATH/bin" +ENV PYTHONPATH "${PYTHONPATH}:$CUDA_QUANTUM_PATH" +ENV LD_LIBRARY_PATH="$LD_LIBRARY_PATH:$CUDA_QUANTUM_PATH/lib" + +# Install additional runtime dependencies for optional components if present. + +RUN if [ -n "$(ls -A $CUDA_QUANTUM_PATH/cuquantum)" ]; then \ + wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-keyring_1.0-1_all.deb \ + && dpkg -i cuda-keyring_1.0-1_all.deb \ + && apt-get update && apt-get install -y --no-install-recommends cuda-runtime-11-8 \ + && rm cuda-keyring_1.0-1_all.deb \ + && apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/*; \ + fi + +ENV LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:/usr/local/cuda-11.8/lib64:/usr/local/cuda-11.8/extras/CUPTI/lib64" + +# For now, the CUDA Quantum build hardcodes certain paths and hence expects to find its +# dependencies in specific locations. While a relocatable installation of CUDA Quantum should +# be a good/better option in the future, for now we make sure to copy the dependencies to the +# expected locations. The CUDQ Quantum installation contains an xml file that lists these. +RUN rdom () { local IFS=\> ; read -d \< E C ;} && \ + while rdom; do \ + if [ "$E" = "LLVM_INSTALL_PREFIX" ]; then \ + mkdir -p "$C" && mv "$CUDA_QUANTUM_PATH/llvm"/* "$C"; \ + elif [ "$E" = "CUQUANTUM_INSTALL_PREFIX" ] && [ -n "$(ls -A $CUDA_QUANTUM_PATH/cuquantum)" ]; then \ + mkdir -p "$C" && mv "$CUDA_QUANTUM_PATH/cuquantum"/* "$C"; \ + fi \ + done < "$CUDA_QUANTUM_PATH/build_config.xml" + +# Include additional readmes and samples that are distributed with the image. + +ARG COPYRIGHT_NOTICE="=========================\n\ + NVIDIA CUDA Quantum \n\ +=========================\n\n\ +Version: ${CUDA_QUANTUM_VERSION}\n\n\ +Copyright (c) 2023 NVIDIA Corporation & Affiliates \n\ +All rights reserved.\n" +RUN echo -e "$COPYRIGHT_NOTICE" > "$CUDA_QUANTUM_PATH/Copyright.txt" +RUN echo 'cat "$CUDA_QUANTUM_PATH/Copyright.txt"' > /etc/profile.d/welcome.sh + +# Create cudaq user + +RUN useradd -m cudaq && echo "cudaq:cuda-quantum" | chpasswd && adduser cudaq sudo +ADD ./docs/sphinx/examples/ /home/cudaq/examples/ +ADD ./docker/release/README.md /home/cudaq/README.md +RUN chown -R cudaq /home/cudaq && chgrp -R cudaq /home/cudaq + +USER cudaq +WORKDIR /home/cudaq +ENTRYPOINT ["bash", "-l"] diff --git a/docs/README.md b/docs/README.md index 17405ffd5b..c2f4a7f9a1 100644 --- a/docs/README.md +++ b/docs/README.md @@ -68,8 +68,6 @@ Additional links that may be helpful that are not listed above: objects](https://www.sphinx-doc.org/en/master/usage/restructuredtext/domains.html#cross-referencing) - [Sphinx configuration options](https://www.sphinx-doc.org/en/master/usage/configuration.html) -- [Common Sphinx warnings and - fixes](https://developer.mantidproject.org/Standards/DocumentationGuideForDevs.html#common-warnings-and-fixes) - [Syntax highlighting in inline code](https://sphinxawesome.xyz/demo/inline-code/#syntax-highlighting-in-inline-code) - [Test examples in Python diff --git a/docs/sphinx/conf.py b/docs/sphinx/conf.py index a8cc81825a..514c3983e4 100644 --- a/docs/sphinx/conf.py +++ b/docs/sphinx/conf.py @@ -18,6 +18,7 @@ # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. +import os import sphinx_rtd_theme # -- Project information ----------------------------------------------------- @@ -27,14 +28,10 @@ author = 'NVIDIA Corporation & Affiliates' # The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. +# |version| used in various places throughout the docs. # The short X.Y version. -version = '0.3' #__version__ - -# The full version, including alpha/beta/rc tags. -release = '0.3' #__version__ +version = os.getenv("CUDA_QUANTUM_VERSION", "0.3.0") # -- General configuration --------------------------------------------------- @@ -44,7 +41,6 @@ extensions = [ # 'sphinx.ext.imgmath', 'sphinx.ext.ifconfig', - 'breathe', 'sphinx.ext.autodoc', # to get documentation from python doc comments 'sphinx.ext.autosummary', 'sphinx.ext.autosectionlabel', @@ -54,6 +50,8 @@ #'sphinx.ext.mathjax', 'sphinx.ext.napoleon', # support google/numpy style docstrings #'sphinx.ext.linkcode', + 'sphinx_reredirects', + 'breathe', 'enum_tools.autoenum', # for pretty-print Python enums 'myst_parser', # for including markdown files ] @@ -139,6 +137,10 @@ def setup(app): 'numpy': ('https://numpy.org/doc/stable/', None), } +redirects = { + "versions": "../latest/releases.html" +} + nitpick_ignore = [ ('cpp:identifier', 'GlobalRegisterName'), ('cpp:identifier', 'CountsDictionary::iterator'), diff --git a/docs/sphinx/index.rst b/docs/sphinx/index.rst index bf6bdd788d..0262ec80e3 100644 --- a/docs/sphinx/index.rst +++ b/docs/sphinx/index.rst @@ -3,6 +3,7 @@ CUDA Quantum ************ Welcome to the CUDA Quantum documentation page! +You are browsing the documentation for |version| version of CUDA Quantum. You can find documentation for all released versions :doc:`here `. **CUDA Quantum** is a single-source, modern C++ programming model and compiler platform for the quantum acceleration of existing heterogeneous computing architectures. @@ -18,3 +19,4 @@ for the quantum acceleration of existing heterogeneous computing architectures. Simulator Backends Specifications API Reference + Other Versions diff --git a/docs/sphinx/install.rst b/docs/sphinx/install.rst index 53614e8390..4c8c67b796 100644 --- a/docs/sphinx/install.rst +++ b/docs/sphinx/install.rst @@ -4,11 +4,15 @@ CUDA Quantum Open Beta Installation Docker Image -------------------- -Install the Public Beta Docker Image +Install the Docker Image ++++++++++++++++++++++++++++++++++++ -This public beta release of CUDA Quantum is being deployed via -a provided Docker image. The name of the image is :code:`nvcr.io/nvidia/cuda-quantum:0.3.0`, -and it has been built for :code:`x86_64,amd64` platforms. + +Docker images for all CUDA Quantum releases are available on the `NGC Container Registry`_. +The image for the latest version under development is built from source on our `GitHub repository `__. + +.. _NGC Container Registry: https://catalog.ngc.nvidia.com/orgs/nvidia/containers/cuda-quantum + +To download the 0.3.0 release from NGC, for example, use the command .. code-block:: console @@ -23,12 +27,11 @@ The container can be run using the following command .. code-block:: console - docker run -it --name cuda-quantum nvcr.io/nvidia/cuda-quantum:0.3.0 + docker run -it --name cuda-quantum -This will give you terminal access to the created container, but you are free to attach -an existing VSCode IDE to it. +replacing :code:`` with the name and tag of the image you downloaded. -(what you'll see) +This will give you terminal access to the created container, for example .. code-block:: console @@ -40,13 +43,11 @@ an existing VSCode IDE to it. NVIDIA CUDA Quantum ========================= - CUDA Quantum Version 0.3.0 + Version: 0.3.0 Copyright (c) 2023 NVIDIA Corporation & Affiliates All rights reserved. - cudaq@container:~$ ls - README.md examples cudaq@container:~$ ls examples/ cpp python @@ -77,12 +78,12 @@ To do so, install the `Dev Containers extension`_: .. image:: _static/devContainersExtension.png Follow the steps :ref:`above` to start the container. -Open VS Code and navigate to the Remote Explorer. You should see the running cuda-quantum dev container listed there. +Open VS Code and navigate to the Remote Explorer. You should see the running cuda-quantum development container listed there. .. image:: _static/attachToDevContainer.png Click on :code:`Attach to Container`. A new VS Code instance will open in that container. Open the `/home/cudaq` -folder to see the README and the CUDA Quantum examples that are included in the container. To run the examples, +folder to see the `README.md` file and the CUDA Quantum examples that are included in the container. To run the examples, open a terminal by going to the Terminal menu and select :code:`New Terminal`. .. image:: _static/openTerminal.png @@ -100,75 +101,16 @@ or run the Python examples using the Python interpreter. VS Code extensions that you have installed locally, such as e.g. an extension for Jupyter notebooks, may not be automatically active in the container environment. You may need to install your preferred - extension in the container environment for all of your dev tools to be available. + extension in the container environment for all of your development tools to be available. Build CUDA Quantum from Source ------------------------------ -Here we will assume a Ubuntu 22.04 system. Adjust the package manager calls -for your distribution. Make sure that recent versions `cmake` and `ninja` installed. -The build also requires a recent version of `clang/clang++` or `gcc/g++` -(must have C++20 support). - -Get the basic compilers you'll need via apt-get -+++++++++++++++++++++++++++++++++++++++++++++++ -.. code:: bash - - apt-get update && apt-get install -y --no-install-recommends gcc g++ - -On Ubuntu 22.04 this will get you GCC 11. - -Get cuQuantum (optional) -++++++++++++++++++++++++ - -.. code:: bash - - wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/cuda-keyring_1.0-1_all.deb \ - dpkg -i cuda-keyring_1.0-1_all.deb - apt-get update && apt-get -y install cuquantum cuquantum-dev - -Get LLVM / Clang / MLIR -++++++++++++++++++++++++ - -You will need the same version of LLVM as our submodule in `tpls/llvm`. - -.. code:: bash - - mkdir llvm-project && cd llvm-project - git init - git remote add origin https://github.com/llvm/llvm-project - # note this will change as the project evolves, - # Must be == to the hash we use for the tpls/llvm submodule. - git fetch origin --depth=1 c0b45fef155fbe3f17f9a6f99074682c69545488 - git reset --hard FETCH_HEAD - mkdir build && cd build - cmake .. -G Ninja - -DLLVM_TARGETS_TO_BUILD="host" \ - -DCMAKE_INSTALL_PREFIX=/opt/llvm/ - -DLLVM_ENABLE_PROJECTS="clang;mlir" - -DCMAKE_BUILD_TYPE=Release - -DLLVM_ENABLE_ASSERTIONS=ON - -DLLVM_INSTALL_UTILS=TRUE - ninja install - # This is needed for FileCheck tests. - cp bin/llvm-lit /opt/llvm/bin/ - -Build CUDA Quantum -++++++++++++++++++ -You must use the same compiler that you compiled LLVM with to compile CUDA Quantum. - -.. code:: bash - - git clone https://github.com/NVIDIA/cuda-quantum && cd cuda-quantum - mkdir build && cd build - cmake .. -G -DCMAKE_INSTALL_PREFIX=$HOME/.cudaq - -DLLVM_DIR=/path/to/llvm/lib/cmake/llvm - -DCUDAQ_ENABLE_PYTHON=TRUE - \# (optional, if cuquantum is installed) - -DCUSTATEVEC=/opt/nvidia/cuquantum - ninja install - ctest +For more information about building CUDA Quantum from source, +we refer to the `CUDA Quantum GitHub repository`_. + +.. _CUDA Quantum GitHub repository: https://github.com/NVIDIA/cuda-quantum/blob/main/Building.md Next Steps ---------- @@ -176,7 +118,7 @@ With the CUDA Quantum Docker image installed and a container up and running, che Using CUDA Quantum page_. To run the examples codes in the container, checkout the Compiling and Executing section here_. -Once in the VSCode IDE or in the terminal for the container in headless mode, you'll +Once in the VS Code IDE or in the terminal for the container in headless mode, you'll notice there is an :code:`examples/` folder. These examples are provided to get you started with CUDA Quantum and understanding the programming and execution model. Start of by trying to compile a simple one, like :code:`examples/cpp/basics/static_kernel.cpp` diff --git a/docs/sphinx/releases.rst b/docs/sphinx/releases.rst new file mode 100644 index 0000000000..dda9f01d41 --- /dev/null +++ b/docs/sphinx/releases.rst @@ -0,0 +1,13 @@ +************************ +CUDA Quantum Releases +************************ + +0.3.0 +---------- + +The 0.3.0 release of CUDA Quantum is available as a Docker image for `linux/amd64` platforms. + +- `Download `__ +- `Documentation `__ + +.. TODO: add release notes for official releases. diff --git a/docs/sphinx/versions.rst b/docs/sphinx/versions.rst new file mode 100644 index 0000000000..f87d12d02c --- /dev/null +++ b/docs/sphinx/versions.rst @@ -0,0 +1,6 @@ +************************ +CUDA Quantum Versions +************************ + +.. include:: releases.rst + :start-line: 4 diff --git a/scripts/build_cudaq.sh b/scripts/build_cudaq.sh index 62dc636a96..fd825ad566 100644 --- a/scripts/build_cudaq.sh +++ b/scripts/build_cudaq.sh @@ -20,10 +20,18 @@ # CUQUANTUM_INSTALL_PREFIX=/path/to/dir bash scripts/build_cudaq.sh # # Prerequisites: -# - git, ninja-build, cmake, python3, libpython3-dev, libstdc++-11-dev, libblas-dev (all available via apt install) +# - git, ninja-build, cmake, python3, libpython3-dev, libstdc++-12-dev, libblas-dev (all available via apt install) # - LLVM binaries, libraries, and headers as built by scripts/build_llvm.sh. # - To include simulator backends that use cuQuantum the packages cuquantum and cuquantum-dev are needed. # - Additional python dependencies for running and testing: lit pytest numpy (available via pip install) +# - Additional dependencies for GPU-accelerated components: cuquantum, cutensor, cuda-11-8 +# +# Note: +# The CUDA Quantum build automatically detects whether GPUs are available and will +# only include any GPU based components if they are. It is possible to override this +# behavior and force building GPU components even if no GPU is detected by setting the +# FORCE_COMPILE_GPU_COMPONENTS environment variable to true. This is useful primarily +# when building docker images since GPUs may not be accessible during build. LLVM_INSTALL_PREFIX=${LLVM_INSTALL_PREFIX:-/opt/llvm} CUQUANTUM_INSTALL_PREFIX=${CUQUANTUM_INSTALL_PREFIX:-/opt/nvidia/cuquantum} @@ -31,14 +39,17 @@ CUDAQ_INSTALL_PREFIX=${CUDAQ_INSTALL_PREFIX:-"$HOME/.cudaq"} # Process command line arguments (return 0 2>/dev/null) && is_sourced=true || is_sourced=false -build_configuration=Release +build_configuration=${CMAKE_BUILD_TYPE:-Release} +verbose=false __optind__=$OPTIND OPTIND=1 -while getopts ":c:" opt; do +while getopts ":c:v" opt; do case $opt in c) build_configuration="$OPTARG" ;; + v) verbose=true + ;; \?) echo "Invalid command line option -$OPTARG" >&2 if $is_sourced; then return 1; else exit 1; fi ;; @@ -80,7 +91,7 @@ fi cuda_version=`nvcc --version 2>/dev/null | grep -o 'release [0-9]*\.[0-9]*' | cut -d ' ' -f 2` cuda_major=`echo $cuda_version | cut -d '.' -f 1` cuda_minor=`echo $cuda_version | cut -d '.' -f 2` -if [ ! -x "$(command -v nvidia-smi)" ] && [ "$COMPILE_GPU_BACKENDS" != "true" ] ; then # the second check here is to avoid having to use https://discuss.huggingface.co/t/how-to-deal-with-no-gpu-during-docker-build-time/28544 +if [ ! -x "$(command -v nvidia-smi)" ] && [ "$FORCE_COMPILE_GPU_COMPONENTS" != "true" ] ; then # the second check here is to avoid having to use https://discuss.huggingface.co/t/how-to-deal-with-no-gpu-during-docker-build-time/28544 echo "No GPU detected - GPU backends will be omitted from the build." custatevec_flag="" elif [ "$cuda_version" = "" ] || [ "$cuda_major" -lt "11" ] || ([ "$cuda_minor" -lt "8" ] && [ "$cuda_major" -eq "11" ]); then @@ -104,14 +115,20 @@ mkdir -p "$CUDAQ_INSTALL_PREFIX/bin" mkdir -p "$working_dir/build" && cd "$working_dir/build" && rm -rf * mkdir -p logs && rm -rf logs/* +# Determine linker and linker flags +cmake_common_linker_flags_init="" +if [ -x "$(command -v "$LLVM_INSTALL_PREFIX/bin/ld.lld")" ]; then + echo "Configuring nvq++ to use the lld linker by default." + NVQPP_LD_PATH="$LLVM_INSTALL_PREFIX/bin/ld.lld" +fi + # Generate CMake files # (utils are needed for custom testing tools, e.g. CircuitCheck) -cmake_common_linker_flags_init="" -llvm_dir="$llvm_lib_dir/cmake/llvm" -echo "Preparing CUDA Quantum build with LLVM_DIR=$llvm_dir..." -cmake -G Ninja "$repo_root" \ +echo "Preparing CUDA Quantum build with LLVM installation in $LLVM_INSTALL_PREFIX..." +cmake_args="-G Ninja "$repo_root" \ + -DCMAKE_COMPILE_WARNING_AS_ERROR=OFF \ -DCMAKE_INSTALL_PREFIX="$CUDAQ_INSTALL_PREFIX" \ - -DLLVM_DIR="$llvm_dir" \ + -DLLVM_DIR="$llvm_lib_dir/cmake/llvm" \ -DNVQPP_LD_PATH="$NVQPP_LD_PATH" \ -DCMAKE_BUILD_TYPE=$build_configuration \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ @@ -120,18 +137,37 @@ cmake -G Ninja "$repo_root" \ -DCMAKE_EXE_LINKER_FLAGS_INIT="$cmake_common_linker_flags_init" \ -DCMAKE_MODULE_LINKER_FLAGS_INIT="$cmake_common_linker_flags_init" \ -DCMAKE_SHARED_LINKER_FLAGS_INIT="$cmake_common_linker_flags_init" \ - $custatevec_flag 2> logs/cmake_error.txt 1> logs/cmake_output.txt + $custatevec_flag" +if $verbose; then + cmake $cmake_args +else + cmake $cmake_args 2> logs/cmake_error.txt 1> logs/cmake_output.txt +fi # Build and install CUDAQ echo "Building CUDA Quantum with configuration $build_configuration..." logs_dir=`pwd`/logs -echo "The progress of the build is being logged to $logs_dir/ninja_output.txt." -ninja install 2> "$logs_dir/ninja_error.txt" 1> "$logs_dir/ninja_output.txt" +if $verbose; then + ninja install +else + echo "The progress of the build is being logged to $logs_dir/ninja_output.txt." + ninja install 2> "$logs_dir/ninja_error.txt" 1> "$logs_dir/ninja_output.txt" +fi + if [ ! "$?" -eq "0" ]; then - echo "Build failed. Please check the files in the $logs_dir directory." + echo "Build failed. Please check the console output or the files in the $logs_dir directory." cd "$working_dir" && if $is_sourced; then return 1; else exit 1; fi else cp "$repo_root/LICENSE" "$CUDAQ_INSTALL_PREFIX/LICENSE" cp "$repo_root/NOTICE" "$CUDAQ_INSTALL_PREFIX/NOTICE" + + # The CUDA Quantum installation as built above is not fully self-container; + # It will, in particular, break if the LLVM tools are not in the expected location. + # We save any system configurations that are assumed by the installation with the installation. + echo "" > "$CUDAQ_INSTALL_PREFIX/build_config.xml" + echo "$LLVM_INSTALL_PREFIX" >> "$CUDAQ_INSTALL_PREFIX/build_config.xml" + echo "$CUQUANTUM_INSTALL_PREFIX" >> "$CUDAQ_INSTALL_PREFIX/build_config.xml" + echo "" >> "$CUDAQ_INSTALL_PREFIX/build_config.xml" + cd "$working_dir" && echo "Installed CUDA Quantum in directory: $CUDAQ_INSTALL_PREFIX" fi diff --git a/scripts/build_docs.sh b/scripts/build_docs.sh index 66fc59ea36..8b2adfe3f5 100644 --- a/scripts/build_docs.sh +++ b/scripts/build_docs.sh @@ -154,11 +154,14 @@ cp -r "$doxygen_output_dir" sphinx/_doxygen/ # cp -r "$dialect_output_dir" sphinx/_mdgen/ # uncomment once we use the content from those files rm -rf "$sphinx_output_dir" -sphinx-build -n -W --keep-going -b html sphinx "$sphinx_output_dir" -j auto 2> "$logs_dir/sphinx_error.txt" 1> "$logs_dir/sphinx_output.txt" +sphinx-build -v -n -W --keep-going -b html sphinx "$sphinx_output_dir" -j auto 2> "$logs_dir/sphinx_error.txt" 1> "$logs_dir/sphinx_output.txt" sphinx_exit_code=$? if [ ! "$sphinx_exit_code" -eq "0" ]; then echo "Failed to generate documentation using sphinx-build." echo "Sphinx exit code: $sphinx_exit_code" + echo "======== logs ========" + cat "$logs_dir/sphinx_output.txt" "$logs_dir/sphinx_error.txt" + echo "======================" docs_exit_code=12 fi @@ -168,6 +171,7 @@ rm -rf sphinx/_mdgen/ mkdir -p "$DOCS_INSTALL_PREFIX" if [ "$docs_exit_code" -eq "0" ]; then cp -r "$sphinx_output_dir"/* "$DOCS_INSTALL_PREFIX" + touch "$DOCS_INSTALL_PREFIX/.nojekyll" echo "Documentation was generated in $DOCS_INSTALL_PREFIX." echo "To browse it, open this url in a browser: file://$DOCS_INSTALL_PREFIX/index.html" else diff --git a/scripts/build_llvm.sh b/scripts/build_llvm.sh index 05dd7e9624..4cd38973e4 100644 --- a/scripts/build_llvm.sh +++ b/scripts/build_llvm.sh @@ -25,10 +25,11 @@ LLVM_INSTALL_PREFIX=${LLVM_INSTALL_PREFIX:-$HOME/.llvm} (return 0 2>/dev/null) && is_sourced=true || is_sourced=false build_configuration=Release llvm_projects="clang;lld;mlir" +verbose=false __optind__=$OPTIND OPTIND=1 -while getopts ":c:s:p:" opt; do +while getopts ":c:s:p:v" opt; do case $opt in c) build_configuration="$OPTARG" ;; @@ -36,6 +37,8 @@ while getopts ":c:s:p:" opt; do ;; p) llvm_projects="$OPTARG" ;; + v) verbose=true + ;; \?) echo "Invalid command line option -$OPTARG" >&2 if $is_sourced; then return 1; else exit 1; fi ;; @@ -48,7 +51,7 @@ working_dir=`pwd` if [ "$llvm_source" = "" ]; then cd $(git rev-parse --show-toplevel) echo "Cloning LLVM submodule..." - git submodule update --init --recursive --recommend-shallow tpls/llvm + git submodule update --init --recursive --recommend-shallow --single-branch tpls/llvm llvm_source=tpls/llvm fi @@ -60,22 +63,68 @@ mkdir -p "$LLVM_INSTALL_PREFIX" mkdir -p "$llvm_source/build" && cd "$llvm_source/build" && rm -rf * mkdir -p logs && rm -rf logs/* -# Generate CMake files +# Specify which components we need to keep the size of the LLVM build down echo "Preparing LLVM build..." -cmake -G Ninja ../llvm \ +projects=(`echo $llvm_projects | tr ';' ' '`) +llvm_projects=`printf "%s;" "${projects[@]}"` +if [ -z "${llvm_projects##*clang;*}" ]; then + echo "- including Clang components" + llvm_components+="clang;clang-format;clang-cmake-exports;clang-headers;clang-libraries;clang-resource-headers;" + projects=("${projects[@]/clang}") +fi +if [ -z "${llvm_projects##*mlir;*}" ]; then + echo "- including MLIR components" + llvm_components+="mlir-cmake-exports;mlir-headers;mlir-libraries;mlir-tblgen;" + projects=("${projects[@]/mlir}") +fi +if [ -z "${llvm_projects##*lld;*}" ]; then + echo "- including LLD components" + llvm_components+="lld;" + projects=("${projects[@]/lld}") +fi +echo "- including general tools and components" +llvm_components+="cmake-exports;llvm-headers;llvm-libraries;" +llvm_components+="llvm-config;llvm-ar;llc;FileCheck;count;not;" + +if [ "$(echo ${projects[*]} | xargs)" != "" ]; then + echo "- including additional projects "$(echo "${projects[*]}" | xargs | tr ' ' ',') + unset llvm_components + install_target=install +else + install_target=install-distribution-stripped +fi + +# Generate CMake files +cmake_args="-G Ninja ../llvm \ -DLLVM_TARGETS_TO_BUILD="host" \ + -DCMAKE_BUILD_TYPE=$build_configuration \ -DCMAKE_INSTALL_PREFIX="$LLVM_INSTALL_PREFIX" \ -DLLVM_ENABLE_PROJECTS="$llvm_projects" \ - -DCMAKE_BUILD_TYPE=$build_configuration \ + -DLLVM_DISTRIBUTION_COMPONENTS=$llvm_components \ -DLLVM_ENABLE_ASSERTIONS=ON \ + -DLLVM_OPTIMIZED_TABLEGEN=ON \ -DCMAKE_EXPORT_COMPILE_COMMANDS=ON \ - -DLLVM_INSTALL_UTILS=TRUE 2> logs/cmake_error.txt 1> logs/cmake_output.txt + -DLLVM_BUILD_EXAMPLES=OFF \ + -DLLVM_ENABLE_OCAMLDOC=OFF \ + -DLLVM_ENABLE_BINDINGS=OFF \ + -DLLVM_INSTALL_UTILS=ON" +if $verbose; then + cmake $cmake_args +else + cmake $cmake_args 2> logs/cmake_error.txt 1> logs/cmake_output.txt +fi # Build and install clang in a folder echo "Building LLVM with configuration $build_configuration..." -echo "The progress of the build is being logged to `pwd`/logs/ninja_output.txt." -ninja install 2> logs/ninja_error.txt 1> logs/ninja_output.txt -status=$? +if $verbose; then + ninja $install_target + status=$? +else + echo "The progress of the build is being logged to `pwd`/logs/ninja_output.txt." + ninja $install_target 2> logs/ninja_error.txt 1> logs/ninja_output.txt + status=$? +fi + if [ "$status" = "" ] || [ ! "$status" -eq "0" ]; then echo "Build failed. Please check the files in the `pwd`/logs directory." cd "$working_dir" && if $is_sourced; then return 1; else exit 1; fi diff --git a/scripts/install_toolchain.sh b/scripts/install_toolchain.sh index 5008d2ea76..fab4b3fb90 100644 --- a/scripts/install_toolchain.sh +++ b/scripts/install_toolchain.sh @@ -16,14 +16,14 @@ # -or- # source scripts/install_toolchain.sh -t -e path/to/dir # -# where cam be either gcc11, clang15, or llvm. +# where can be either llvm, clang16, clang15, gcc12, or gcc11. # The -e option creates a init_command.sh file in the given directory that # can be used to reinstall the same toolchain if needed. (return 0 2>/dev/null) && is_sourced=true || is_sourced=false __optind__=$OPTIND OPTIND=1 -toolchain=gcc11 +toolchain=gcc12 while getopts ":t:e:" opt; do case $opt in t) toolchain="$OPTARG" @@ -46,26 +46,47 @@ function temp_install_if_command_unknown { if [ "$toolchain" = "gcc11" ] ; then - apt-get update && apt-get install -y --no-install-recommends gcc g++ + apt-get update && apt-get install -y --no-install-recommends gcc-11 g++-11 CC=/usr/bin/gcc-11 && CXX=/usr/bin/g++-11 +elif [ "$toolchain" = "gcc12" ] ; then + + apt-get update && apt-get install -y --no-install-recommends gcc-12 g++-12 + CC=/usr/bin/gcc-12 && CXX=/usr/bin/g++-12 + elif [ "$toolchain" = "clang15" ]; then - apt-get update && apt-get install -y --no-install-recommends wget gnupg - wget -O - https://apt.llvm.org/llvm-snapshot.gpg.key | apt-key add - + apt-get update + temp_install_if_command_unknown wget wget + temp_install_if_command_unknown gpg gnupg + temp_install_if_command_unknown add-apt-repository software-properties-common + + wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-15 main" - apt-get update && apt-get install -y --no-install-recommends llvm-15 clang-15 terminfo + apt-get update && apt-get install -y --no-install-recommends clang-15 CC=/usr/lib/llvm-15/bin/clang && CXX=/usr/lib/llvm-15/bin/clang++ +elif [ "$toolchain" = "clang16" ]; then + + apt-get update + temp_install_if_command_unknown wget wget + temp_install_if_command_unknown gpg gnupg + temp_install_if_command_unknown add-apt-repository software-properties-common + + wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc + add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main" + apt-get update && apt-get install -y --no-install-recommends clang-16 + CC=/usr/lib/llvm-16/bin/clang && CXX=/usr/lib/llvm-16/bin/clang++ + elif [ "$toolchain" = "llvm" ]; then # We build the llvm toolchain against libstdc++ for now rather than building the runtime libraries as well. - apt-get update && apt-get install -y --no-install-recommends libstdc++-11-dev + apt-get update && apt-get install -y --no-install-recommends libstdc++-12-dev LLVM_INSTALL_PREFIX=${LLVM_INSTALL_PREFIX:-/opt/llvm} if [ ! -f "$LLVM_INSTALL_PREFIX/bin/clang" ] || [ ! -f "$LLVM_INSTALL_PREFIX/bin/clang++" ] || [ ! -f "$LLVM_INSTALL_PREFIX/bin/ld.lld" ]; then - this_file_dir=`dirname "$(readlink -f "${BASH_SOURCE[0]}")"` # alternatively, we could pass the script path instead of llvm-toolchain + this_file_dir=`dirname "$(readlink -f "${BASH_SOURCE[0]}")"` if [ ! -d "$LLVM_SOURCE" ]; then mkdir -p "$HOME/.llvm_project" llvm_tmp_dir=`mktemp -d -p "$HOME/.llvm_project"` && LLVM_SOURCE="$llvm_tmp_dir" @@ -73,19 +94,24 @@ elif [ "$toolchain" = "llvm" ]; then git clone -b main --single-branch --depth 1 https://github.com/llvm/llvm-project "$LLVM_SOURCE" fi + # We use the clang to bootstrap the llvm build since it is faster than gcc. + temp_install_if_command_unknown wget wget + temp_install_if_command_unknown gpg gnupg + temp_install_if_command_unknown add-apt-repository software-properties-common + wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc + add-apt-repository "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main" + apt-get update && temp_install_if_command_unknown clang-16 clang-16 + temp_install_if_command_unknown ninja ninja-build temp_install_if_command_unknown cmake cmake - temp_install_if_command_unknown gcc gcc - temp_install_if_command_unknown g++ g++ - LLVM_INSTALL_PREFIX="$LLVM_INSTALL_PREFIX" bash "$this_file_dir/build_llvm.sh" -s "$LLVM_SOURCE" -c Release -p "clang;lld" + LLVM_INSTALL_PREFIX="$LLVM_INSTALL_PREFIX" \ + CC=/usr/lib/llvm-16/bin/clang CXX=/usr/lib/llvm-16/bin/clang++ \ + bash "$this_file_dir/build_llvm.sh" -s "$LLVM_SOURCE" -c Release -p "clang;lld" if [ -d "$llvm_tmp_dir" ]; then + echo "The build logs have been moved to $LLVM_INSTALL_PREFIX/logs." + mkdir -p "$LLVM_INSTALL_PREFIX/logs" && mv "$llvm_tmp_dir/build/logs"/* "$LLVM_INSTALL_PREFIX/logs/" rm -rf "$llvm_tmp_dir" fi - - if [ "$APT_UNINSTALL" != "" ]; then - echo "Uninstalling packages used for bootstrapping: $APT_UNINSTALL" - apt-get remove -y $APT_UNINSTALL && apt-get autoremove -y - fi fi CC="$LLVM_INSTALL_PREFIX/bin/clang" && CXX="$LLVM_INSTALL_PREFIX/bin/clang++" @@ -104,11 +130,16 @@ elif [ "$toolchain" = "llvm" ]; then else echo "The requested toolchain cannot be installed by this script." - echo "Supported toolchains: llvm, clang15, gcc11." + echo "Supported toolchains: llvm, clang16, clang15, gcc12, gcc11." if $is_sourced; then return 1; else exit 1; fi fi +if [ "$APT_UNINSTALL" != "" ]; then + echo "Uninstalling packages used for bootstrapping: $APT_UNINSTALL" + apt-get remove -y $APT_UNINSTALL && apt-get autoremove -y +fi + if [ -x "$(command -v "$CC")" ] && [ -x "$(command -v "$CXX")" ]; then apt-get autoremove -y --purge && apt-get clean && rm -rf /var/lib/apt/lists/* export CC="$CC" && export CXX="$CXX" @@ -123,5 +154,6 @@ if [ -x "$(command -v "$CC")" ] && [ -x "$(command -v "$CXX")" ]; then fi else echo "Failed to install $toolchain toolchain." + unset CC && unset CXX if $is_sourced; then return 10; else exit 10; fi fi diff --git a/scripts/validate_container.sh b/scripts/validate_container.sh index 6ec6bb8d54..792721a4d0 100644 --- a/scripts/validate_container.sh +++ b/scripts/validate_container.sh @@ -18,6 +18,48 @@ failed=0 skipped=0 samples=0 +requested_backends=`\ + echo "default" + for target in $@; \ + do echo "$target"; \ + done` + +available_backends=`\ + echo "default" + for file in $(ls $CUDA_QUANTUM_PATH/platforms/*.config); \ + do basename $file | cut -d "." -f 1; \ + done` + +missing_backend=false +if [ $# -eq 0 ] +then + requested_backends="$available_backends" +else + for t in $requested_backends + do + echo $available_backends | grep -w -q $t + if [ ! $? -eq 0 ]; + then + echo "No backend configuration found for $t." + missing_backend=true + fi + done +fi + +echo +echo "Detected backends:" +echo "$available_backends" +echo +echo "Testing backends:" +echo "$requested_backends" +echo + +if $missing_backend; +then + echo "Abort due to missing backend configuration." + exit 1 +fi + echo "=============================" echo "== Python Tests ==" echo "=============================" @@ -51,28 +93,31 @@ do echo "Testing $filename:" echo "Source: $ex" let "samples+=1" - for t in "" "dm" "cuquantum" "cuquantum_mgpu" "tensornet"; + for t in $requested_backends do - if [[ "$ex" == *"cuquantum"* ]] && [ "$t" = "" ]; + if [[ "$ex" == *"cuquantum"* ]]; then let "skipped+=1" - if [ "$t" = "" ]; then - echo "Skipping default target."; - else - echo "Skipping target $t."; - fi - elif [[ "$ex" != *"nois"* ]] && [ "$t" = "dm" ]; + echo "Skipping $t target."; + + elif [[ "$ex" != *"nois"* ]] && [ "$t" == "density-matrix-cpu" ]; then let "skipped+=1" - echo "Skipping target dm." + echo "Skipping $t target." + else - if [ "$t" = "" ]; then - echo "Testing on default target..." - else - echo "Testing on target $t..." + echo "Testing on $t target..." + if [ "$t" == "default" ]; then + if [[ "$ex" == *"mid_circuit"* ]]; + then + nvq++ --enable-mlir $ex + else + nvq++ $ex + fi + else + nvq++ $ex --qpu $t fi - nvq++ $ex -qpu $t - ./a.out 1> /dev/null + ./a.out &> /dev/null status=$? echo "Exited with code $status" if [ "$status" -eq "0" ]; then @@ -80,7 +125,7 @@ do else let "failed+=1" fi - rm a.out + rm a.out &> /dev/null fi done echo "=============================" @@ -92,4 +137,4 @@ echo "Total passed: $passed" echo "Total failed: $failed" echo "Skipped: $skipped" echo "=============================" -if [ "$failed" -eq "0" ]; then exit 0; else exit 1; fi +if [ "$failed" -eq "0" ]; then exit 0; else exit 10; fi \ No newline at end of file diff --git a/test/AST-Quake/auto_kernel-1.cpp b/test/AST-Quake/auto_kernel-1.cpp deleted file mode 100644 index f86a29423b..0000000000 --- a/test/AST-Quake/auto_kernel-1.cpp +++ /dev/null @@ -1,35 +0,0 @@ -/*************************************************************** -*- C++ -*- *** - * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * - * All rights reserved. * - * * - * This source code and the accompanying materials are made available under * - * the terms of the Apache License 2.0 which accompanies this distribution. * - *******************************************************************************/ - -// RUN: cudaq-quake %s | FileCheck %s - -// Simple test using a type inferenced return value type. - -#include -#include - -struct ak1 { - auto operator()(int i) __qpu__ { - cudaq::qreg q(2); - auto vec = mz(q); - return vec[0]; - } -}; - -// CHECK-LABEL: func.func @__nvqpp__mlirgen__ak1 -// CHECK-SAME: (%[[VAL_0:.*]]: i32) -> i1 attributes { -// CHECK: %[[VAL_13:.*]] = quake.mz(%{{.*}} : !quake.qvec) : !cc.stdvec -// CHECK: %[[VAL_14:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_15:.*]] = arith.extsi %[[VAL_14]] : i32 to i64 -// CHECK: %[[VAL_16:.*]] = cc.stdvec_data %[[VAL_13]] : (!cc.stdvec) -> !llvm.ptr -// CHECK: %[[VAL_17:.*]] = llvm.getelementptr %[[VAL_16]][%[[VAL_15]]] : (!llvm.ptr, i64) -> !llvm.ptr -// CHECK: %[[VAL_18:.*]] = llvm.load %[[VAL_17]] : !llvm.ptr -// CHECK: return %[[VAL_18]] : i1 -// CHECK: } -// CHECK-NOT: func.func private @_ZNKSt14_Bit_referencecvbEv() -> i1 - diff --git a/test/AST-Quake/measure_bell.cpp b/test/AST-Quake/measure_bell.cpp deleted file mode 100644 index 483cf799b3..0000000000 --- a/test/AST-Quake/measure_bell.cpp +++ /dev/null @@ -1,265 +0,0 @@ -/*************************************************************** -*- C++ -*- *** - * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * - * All rights reserved. * - * * - * This source code and the accompanying materials are made available under * - * the terms of the Apache License 2.0 which accompanies this distribution. * - ******************************************************************************/ - -// RUN: cudaq-quake %s | FileCheck %s - -#include - -struct bell { - void operator()(int num_iters) __qpu__ { - cudaq::qreg q(2); - int n = 0; - for (int i = 0; i < num_iters; i++) { - h(q[0]); - x(q[0], q[1]); - auto results = mz(q); - bool r0 = results[0]; - if (r0 == results[1]) { - n++; - } - } - } -}; - -int main() { bell{}(100); } - -// CHECK-LABEL: func.func @__nvqpp__mlirgen__bell( -// CHECK-SAME: %[[VAL_0:.*]]: i32) -// CHECK: %[[VAL_1:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_0]], %[[VAL_1]][] : memref -// CHECK: %[[VAL_2:.*]] = arith.constant 2 : i32 -// CHECK: %[[VAL_3:.*]] = arith.extsi %[[VAL_2]] : i32 to i64 -// CHECK: %[[VAL_4:.*]] = quake.alloca(%[[VAL_3]] : i64) : !quake.qvec -// CHECK: %[[VAL_5:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_6:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_5]], %[[VAL_6]][] : memref -// CHECK: cc.scope { -// CHECK: %[[VAL_7:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_8:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_7]], %[[VAL_8]][] : memref -// CHECK: cc.loop while { -// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref -// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_1]][] : memref -// CHECK: %[[VAL_11:.*]] = arith.cmpi slt, %[[VAL_9]], %[[VAL_10]] : i32 -// CHECK: cc.condition %[[VAL_11]] -// CHECK: } do { -// CHECK: cc.scope { -// CHECK: %[[VAL_12:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_13:.*]] = arith.extsi %[[VAL_12]] : i32 to i64 -// CHECK: %[[VAL_14:.*]] = quake.qextract %[[VAL_4]]{{\[}}%[[VAL_13]]] : !quake.qvec[i64] -> !quake.qref -// CHECK: quake.h (%[[VAL_14]]) -// CHECK: %[[VAL_15:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_16:.*]] = arith.extsi %[[VAL_15]] : i32 to i64 -// CHECK: %[[VAL_17:.*]] = quake.qextract %[[VAL_4]]{{\[}}%[[VAL_16]]] : !quake.qvec[i64] -> !quake.qref -// CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_19:.*]] = arith.extsi %[[VAL_18]] : i32 to i64 -// CHECK: %[[VAL_20:.*]] = quake.qextract %[[VAL_4]]{{\[}}%[[VAL_19]]] : !quake.qvec[i64] -> !quake.qref -// CHECK: quake.x {{\[}}%[[VAL_17]] : !quake.qref] (%[[VAL_20]]) -// CHECK: %[[VAL_21:.*]] = quake.mz(%[[VAL_4]] : !quake.qvec) : !cc.stdvec -// CHECK: %[[VAL_22:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_23:.*]] = arith.extsi %[[VAL_22]] : i32 to i64 -// CHECK: %[[VAL_24:.*]] = cc.stdvec_data %[[VAL_21]] : (!cc.stdvec) -> !llvm.ptr -// CHECK: %[[VAL_25:.*]] = llvm.getelementptr %[[VAL_24]]{{\[}}%[[VAL_23]]] : (!llvm.ptr, i64) -> !llvm.ptr -// CHECK: %[[VAL_26:.*]] = llvm.load %[[VAL_25]] : !llvm.ptr -// CHECK: %[[VAL_27:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_26]], %[[VAL_27]][] : memref -// CHECK: %[[VAL_28:.*]] = memref.load %[[VAL_27]][] : memref -// CHECK: %[[VAL_29:.*]] = arith.extui %[[VAL_28]] : i1 to i32 -// CHECK: %[[VAL_30:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_31:.*]] = arith.extsi %[[VAL_30]] : i32 to i64 -// CHECK: %[[VAL_32:.*]] = cc.stdvec_data %[[VAL_21]] : (!cc.stdvec) -> !llvm.ptr -// CHECK: %[[VAL_33:.*]] = llvm.getelementptr %[[VAL_32]]{{\[}}%[[VAL_31]]] : (!llvm.ptr, i64) -> !llvm.ptr -// CHECK: %[[VAL_34:.*]] = llvm.load %[[VAL_33]] : !llvm.ptr -// CHECK: %[[VAL_35:.*]] = arith.extui %[[VAL_34]] : i1 to i32 -// CHECK: %[[VAL_36:.*]] = arith.cmpi eq, %[[VAL_29]], %[[VAL_35]] : i32 -// CHECK: cc.if(%[[VAL_36]]) { -// CHECK: cc.scope { -// CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_6]][] : memref -// CHECK: %[[VAL_38:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_39:.*]] = arith.addi %[[VAL_37]], %[[VAL_38]] : i32 -// CHECK: memref.store %[[VAL_39]], %[[VAL_6]][] : memref -// CHECK: } -// CHECK: } -// CHECK: } -// CHECK: cc.continue -// CHECK: } step { -// CHECK: %[[VAL_40:.*]] = memref.load %[[VAL_8]][] : memref -// CHECK: %[[VAL_41:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_42:.*]] = arith.addi %[[VAL_40]], %[[VAL_41]] : i32 -// CHECK: memref.store %[[VAL_42]], %[[VAL_8]][] : memref -// CHECK: } -// CHECK: } -// CHECK: return -// CHECK: } - -struct libertybell { - void operator()(int num_iters) __qpu__ { - cudaq::qreg q(2); - int n = 0; - for (int i = 0; i < num_iters; i++) { - h(q[0]); - x(q[0], q[1]); - auto results = mz(q); - if (results[0] == results[1]) { - n++; - } - } - } -}; - -struct tinkerbell { - void operator()(int num_iters) __qpu__ { - cudaq::qreg q(2); - int n = 0; - for (int i = 0; i < num_iters; i++) { - h(q[0]); - x(q[0], q[1]); - auto results = mz(q); - auto r0 = results[0]; - auto r1 = results[1]; - if (r0 == r1) { - n++; - } - } - } -}; - -// CHECK-LABEL: func.func @__nvqpp__mlirgen__libertybell( -// CHECK-SAME: %[[VAL_0:.*]]: i32) -// CHECK: %[[VAL_1:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_0]], %[[VAL_1]][] : memref -// CHECK: %[[VAL_2:.*]] = arith.constant 2 : i32 -// CHECK: %[[VAL_3:.*]] = arith.extsi %[[VAL_2]] : i32 to i64 -// CHECK: %[[VAL_4:.*]] = quake.alloca(%[[VAL_3]] : i64) : !quake.qvec -// CHECK: %[[VAL_5:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_6:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_5]], %[[VAL_6]][] : memref -// CHECK: cc.scope { -// CHECK: %[[VAL_7:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_8:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_7]], %[[VAL_8]][] : memref -// CHECK: cc.loop while { -// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref -// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_1]][] : memref -// CHECK: %[[VAL_11:.*]] = arith.cmpi slt, %[[VAL_9]], %[[VAL_10]] : i32 -// CHECK: cc.condition %[[VAL_11]] -// CHECK: } do { -// CHECK: cc.scope { -// CHECK: %[[VAL_12:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_13:.*]] = arith.extsi %[[VAL_12]] : i32 to i64 -// CHECK: %[[VAL_14:.*]] = quake.qextract %[[VAL_4]]{{\[}}%[[VAL_13]]] : !quake.qvec[i64] -> !quake.qref -// CHECK: quake.h (%[[VAL_14]]) -// CHECK: %[[VAL_15:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_16:.*]] = arith.extsi %[[VAL_15]] : i32 to i64 -// CHECK: %[[VAL_17:.*]] = quake.qextract %[[VAL_4]]{{\[}}%[[VAL_16]]] : !quake.qvec[i64] -> !quake.qref -// CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_19:.*]] = arith.extsi %[[VAL_18]] : i32 to i64 -// CHECK: %[[VAL_20:.*]] = quake.qextract %[[VAL_4]]{{\[}}%[[VAL_19]]] : !quake.qvec[i64] -> !quake.qref -// CHECK: quake.x {{\[}}%[[VAL_17]] : !quake.qref] (%[[VAL_20]]) -// CHECK: %[[VAL_21:.*]] = quake.mz(%[[VAL_4]] : !quake.qvec) : !cc.stdvec -// CHECK: %[[VAL_22:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_23:.*]] = arith.extsi %[[VAL_22]] : i32 to i64 -// CHECK: %[[VAL_24:.*]] = cc.stdvec_data %[[VAL_21]] : (!cc.stdvec) -> !llvm.ptr -// CHECK: %[[VAL_25:.*]] = llvm.getelementptr %[[VAL_24]]{{\[}}%[[VAL_23]]] : (!llvm.ptr, i64) -> !llvm.ptr -// CHECK: %[[VAL_26:.*]] = llvm.load %[[VAL_25]] : !llvm.ptr -// CHECK: %[[VAL_27:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_28:.*]] = arith.extsi %[[VAL_27]] : i32 to i64 -// CHECK: %[[VAL_29:.*]] = cc.stdvec_data %[[VAL_21]] : (!cc.stdvec) -> !llvm.ptr -// CHECK: %[[VAL_30:.*]] = llvm.getelementptr %[[VAL_29]]{{\[}}%[[VAL_28]]] : (!llvm.ptr, i64) -> !llvm.ptr -// CHECK: %[[VAL_31:.*]] = llvm.load %[[VAL_30]] : !llvm.ptr -// CHECK: %[[VAL_32:.*]] = arith.cmpi eq, %[[VAL_31]], %[[VAL_26]] : i1 -// CHECK: cc.if(%[[VAL_32]]) { -// CHECK: cc.scope { -// CHECK: %[[VAL_33:.*]] = memref.load %[[VAL_6]][] : memref -// CHECK: %[[VAL_34:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_35:.*]] = arith.addi %[[VAL_33]], %[[VAL_34]] : i32 -// CHECK: memref.store %[[VAL_35]], %[[VAL_6]][] : memref -// CHECK: } -// CHECK: } -// CHECK: } -// CHECK: cc.continue -// CHECK: } step { -// CHECK: %[[VAL_36:.*]] = memref.load %[[VAL_8]][] : memref -// CHECK: %[[VAL_37:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_38:.*]] = arith.addi %[[VAL_36]], %[[VAL_37]] : i32 -// CHECK: memref.store %[[VAL_38]], %[[VAL_8]][] : memref -// CHECK: } -// CHECK: } -// CHECK: return -// CHECK: } - -// CHECK-LABEL: func.func @__nvqpp__mlirgen__tinkerbell( -// CHECK-SAME: %[[VAL_0:.*]]: i32) attributes -// CHECK: %[[VAL_1:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_0]], %[[VAL_1]][] : memref -// CHECK: %[[VAL_2:.*]] = arith.constant 2 : i32 -// CHECK: %[[VAL_3:.*]] = arith.extsi %[[VAL_2]] : i32 to i64 -// CHECK: %[[VAL_4:.*]] = quake.alloca(%[[VAL_3]] : i64) : !quake.qvec -// CHECK: %[[VAL_5:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_6:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_5]], %[[VAL_6]][] : memref -// CHECK: cc.scope { -// CHECK: %[[VAL_7:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_8:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_7]], %[[VAL_8]][] : memref -// CHECK: cc.loop while { -// CHECK: %[[VAL_9:.*]] = memref.load %[[VAL_8]][] : memref -// CHECK: %[[VAL_10:.*]] = memref.load %[[VAL_1]][] : memref -// CHECK: %[[VAL_11:.*]] = arith.cmpi slt, %[[VAL_9]], %[[VAL_10]] : i32 -// CHECK: cc.condition %[[VAL_11]] -// CHECK: } do { -// CHECK: cc.scope { -// CHECK: %[[VAL_12:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_13:.*]] = arith.extsi %[[VAL_12]] : i32 to i64 -// CHECK: %[[VAL_14:.*]] = quake.qextract %[[VAL_4]]{{\[}}%[[VAL_13]]] : !quake.qvec[i64] -> !quake.qref -// CHECK: quake.h (%[[VAL_14]]) -// CHECK: %[[VAL_15:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_16:.*]] = arith.extsi %[[VAL_15]] : i32 to i64 -// CHECK: %[[VAL_17:.*]] = quake.qextract %[[VAL_4]]{{\[}}%[[VAL_16]]] : !quake.qvec[i64] -> !quake.qref -// CHECK: %[[VAL_18:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_19:.*]] = arith.extsi %[[VAL_18]] : i32 to i64 -// CHECK: %[[VAL_20:.*]] = quake.qextract %[[VAL_4]]{{\[}}%[[VAL_19]]] : !quake.qvec[i64] -> !quake.qref -// CHECK: quake.x {{\[}}%[[VAL_17]] : !quake.qref] (%[[VAL_20]]) -// CHECK: %[[VAL_21:.*]] = quake.mz(%[[VAL_4]] : !quake.qvec) : !cc.stdvec -// CHECK: %[[VAL_22:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_23:.*]] = arith.extsi %[[VAL_22]] : i32 to i64 -// CHECK: %[[VAL_24:.*]] = cc.stdvec_data %[[VAL_21]] : (!cc.stdvec) -> !llvm.ptr -// CHECK: %[[VAL_25:.*]] = llvm.getelementptr %[[VAL_24]]{{\[}}%[[VAL_23]]] : (!llvm.ptr, i64) -> !llvm.ptr -// CHECK: %[[VAL_26:.*]] = llvm.load %[[VAL_25]] : !llvm.ptr -// CHECK: %[[VAL_27:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_26]], %[[VAL_27]][] : memref -// CHECK: %[[VAL_28:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_29:.*]] = arith.extsi %[[VAL_28]] : i32 to i64 -// CHECK: %[[VAL_30:.*]] = cc.stdvec_data %[[VAL_21]] : (!cc.stdvec) -> !llvm.ptr -// CHECK: %[[VAL_31:.*]] = llvm.getelementptr %[[VAL_30]]{{\[}}%[[VAL_29]]] : (!llvm.ptr, i64) -> !llvm.ptr -// CHECK: %[[VAL_32:.*]] = llvm.load %[[VAL_31]] : !llvm.ptr -// CHECK: %[[VAL_33:.*]] = memref.alloca() : memref -// CHECK: memref.store %[[VAL_32]], %[[VAL_33]][] : memref -// CHECK: %[[VAL_34:.*]] = memref.load %[[VAL_33]][] : memref -// CHECK: %[[VAL_35:.*]] = memref.load %[[VAL_27]][] : memref -// CHECK: %[[VAL_36:.*]] = arith.cmpi eq, %[[VAL_34]], %[[VAL_35]] : i1 -// CHECK: cc.if(%[[VAL_36]]) { -// CHECK: cc.scope { -// CHECK: %[[VAL_37:.*]] = memref.load %[[VAL_6]][] : memref -// CHECK: %[[VAL_38:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_39:.*]] = arith.addi %[[VAL_37]], %[[VAL_38]] : i32 -// CHECK: memref.store %[[VAL_39]], %[[VAL_6]][] : memref -// CHECK: } -// CHECK: } -// CHECK: } -// CHECK: cc.continue -// CHECK: } step { -// CHECK: %[[VAL_40:.*]] = memref.load %[[VAL_8]][] : memref -// CHECK: %[[VAL_41:.*]] = arith.constant 1 : i32 -// CHECK: %[[VAL_42:.*]] = arith.addi %[[VAL_40]], %[[VAL_41]] : i32 -// CHECK: memref.store %[[VAL_42]], %[[VAL_8]][] : memref -// CHECK: } -// CHECK: } -// CHECK: return -// CHECK: } - diff --git a/test/AST-Quake/vector_bool.cpp b/test/AST-Quake/vector_bool.cpp deleted file mode 100644 index 409e388fad..0000000000 --- a/test/AST-Quake/vector_bool.cpp +++ /dev/null @@ -1,34 +0,0 @@ -/*************************************************************** -*- C++ -*- *** - * Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. * - * All rights reserved. * - * * - * This source code and the accompanying materials are made available under * - * the terms of the Apache License 2.0 which accompanies this distribution. * - *******************************************************************************/ - -// RUN: cudaq-quake %s | FileCheck %s - -// Simple test using a std::vector operator. - -#include - -struct t1 { - bool operator()(std::vector d) __qpu__ { - cudaq::qreg q(2); - auto vec = mz(q); - return vec[0]; - } -}; - -// CHECK-LABEL: func.func @__nvqpp__mlirgen__t1 -// CHECK-SAME: (%[[VAL_0:.*]]: !cc.stdvec) -> i1 attributes {{{.*}}"cudaq-entrypoint"{{.*}}} { -// CHECK: %[[VAL_13:.*]] = quake.mz(%{{.*}} : !quake.qvec) : !cc.stdvec -// CHECK: %[[VAL_14:.*]] = arith.constant 0 : i32 -// CHECK: %[[VAL_15:.*]] = arith.extsi %[[VAL_14]] : i32 to i64 -// CHECK: %[[VAL_16:.*]] = cc.stdvec_data %[[VAL_13]] : (!cc.stdvec) -> !llvm.ptr -// CHECK: %[[VAL_17:.*]] = llvm.getelementptr %[[VAL_16]][%[[VAL_15]]] : (!llvm.ptr, i64) -> !llvm.ptr -// CHECK: %[[VAL_18:.*]] = llvm.load %[[VAL_17]] : !llvm.ptr -// CHECK: return %[[VAL_18]] : i1 -// CHECK: } -// CHECK-NOT: func.func private @_ZNKSt14_Bit_referencecvbEv() -> i1 - diff --git a/test/QTX/Transforms/user-provided-pass.qtx b/test/QTX/Transforms/user-provided-pass.qtx deleted file mode 100644 index b17fb242de..0000000000 --- a/test/QTX/Transforms/user-provided-pass.qtx +++ /dev/null @@ -1,26 +0,0 @@ -// ========================================================================== // -// Copyright (c) 2022 - 2023 NVIDIA Corporation & Affiliates. // -// All rights reserved. // -// // -// This source code and the accompanying materials are made available under // -// the terms of the Apache License 2.0 which accompanies this distribution. // -// ========================================================================== // - -// RUN: cudaq-opt --load-pass-plugin %cudaq_lib_dir/HelloWorldQTXPass%cudaq_plugin_ext %s -cudaq-hello-world-qtx | FileCheck %s - -module attributes {qtx.mangled_name_map = {__nvqpp__mlirgen__lambda._ZTSZ4mainE3$_0 = "_ZZ4mainENK3$_0clEv"}} { - qtx.circuit @__nvqpp__mlirgen__lambda._ZTSZ4mainE3$_0() { - %0 = alloca : !qtx.wire - %1 = h %0 : !qtx.wire - %bits, %new_targets = mz %1 : !qtx.wire -> !qtx.wire - return - } -} - -// CHECK-LABEL: qtx.circuit @__nvqpp__mlirgen__lambda._ZTSZ4mainE3$_0() { -// CHECK: %[[VAL_0:.*]] = alloca : !qtx.wire -// CHECK: %[[VAL_1:.*]] = h %[[VAL_0]] : !qtx.wire -// CHECK: %[[VAL_2:.*]], %[[VAL_3:.*]] = mz %[[VAL_1]] : !qtx.wire -> !qtx.wire -// CHECK: return -// CHECK: } -